Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 98d0b695

History | View | Annotate | Download (140.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _DeviceHotplugable(dev):
79

    
80
  return dev.uuid is not None
81

    
82

    
83
def _CheckHostnameSane(lu, name):
84
  """Ensures that a given hostname resolves to a 'sane' name.
85

86
  The given name is required to be a prefix of the resolved hostname,
87
  to prevent accidental mismatches.
88

89
  @param lu: the logical unit on behalf of which we're checking
90
  @param name: the name we should resolve and check
91
  @return: the resolved hostname object
92

93
  """
94
  hostname = netutils.GetHostname(name=name)
95
  if hostname.name != name:
96
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
97
  if not utils.MatchNameComponent(name, [hostname.name]):
98
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
99
                                " same as given hostname '%s'") %
100
                               (hostname.name, name), errors.ECODE_INVAL)
101
  return hostname
102

    
103

    
104
def _CheckOpportunisticLocking(op):
105
  """Generate error if opportunistic locking is not possible.
106

107
  """
108
  if op.opportunistic_locking and not op.iallocator:
109
    raise errors.OpPrereqError("Opportunistic locking is only available in"
110
                               " combination with an instance allocator",
111
                               errors.ECODE_INVAL)
112

    
113

    
114
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
115
  """Wrapper around IAReqInstanceAlloc.
116

117
  @param op: The instance opcode
118
  @param disks: The computed disks
119
  @param nics: The computed nics
120
  @param beparams: The full filled beparams
121
  @param node_name_whitelist: List of nodes which should appear as online to the
122
    allocator (unless the node is already marked offline)
123

124
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
125

126
  """
127
  spindle_use = beparams[constants.BE_SPINDLE_USE]
128
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
129
                                       disk_template=op.disk_template,
130
                                       tags=op.tags,
131
                                       os=op.os_type,
132
                                       vcpus=beparams[constants.BE_VCPUS],
133
                                       memory=beparams[constants.BE_MAXMEM],
134
                                       spindle_use=spindle_use,
135
                                       disks=disks,
136
                                       nics=[n.ToDict() for n in nics],
137
                                       hypervisor=op.hypervisor,
138
                                       node_whitelist=node_name_whitelist)
139

    
140

    
141
def _ComputeFullBeParams(op, cluster):
142
  """Computes the full beparams.
143

144
  @param op: The instance opcode
145
  @param cluster: The cluster config object
146

147
  @return: The fully filled beparams
148

149
  """
150
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
151
  for param, value in op.beparams.iteritems():
152
    if value == constants.VALUE_AUTO:
153
      op.beparams[param] = default_beparams[param]
154
  objects.UpgradeBeParams(op.beparams)
155
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
156
  return cluster.SimpleFillBE(op.beparams)
157

    
158

    
159
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
160
  """Computes the nics.
161

162
  @param op: The instance opcode
163
  @param cluster: Cluster configuration object
164
  @param default_ip: The default ip to assign
165
  @param cfg: An instance of the configuration object
166
  @param ec_id: Execution context ID
167

168
  @returns: The build up nics
169

170
  """
171
  nics = []
172
  for nic in op.nics:
173
    nic_mode_req = nic.get(constants.INIC_MODE, None)
174
    nic_mode = nic_mode_req
175
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
176
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
177

    
178
    net = nic.get(constants.INIC_NETWORK, None)
179
    link = nic.get(constants.NIC_LINK, None)
180
    ip = nic.get(constants.INIC_IP, None)
181

    
182
    if net is None or net.lower() == constants.VALUE_NONE:
183
      net = None
184
    else:
185
      if nic_mode_req is not None or link is not None:
186
        raise errors.OpPrereqError("If network is given, no mode or link"
187
                                   " is allowed to be passed",
188
                                   errors.ECODE_INVAL)
189

    
190
    # ip validity checks
191
    if ip is None or ip.lower() == constants.VALUE_NONE:
192
      nic_ip = None
193
    elif ip.lower() == constants.VALUE_AUTO:
194
      if not op.name_check:
195
        raise errors.OpPrereqError("IP address set to auto but name checks"
196
                                   " have been skipped",
197
                                   errors.ECODE_INVAL)
198
      nic_ip = default_ip
199
    else:
200
      # We defer pool operations until later, so that the iallocator has
201
      # filled in the instance's node(s) dimara
202
      if ip.lower() == constants.NIC_IP_POOL:
203
        if net is None:
204
          raise errors.OpPrereqError("if ip=pool, parameter network"
205
                                     " must be passed too",
206
                                     errors.ECODE_INVAL)
207

    
208
      elif not netutils.IPAddress.IsValid(ip):
209
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
210
                                   errors.ECODE_INVAL)
211

    
212
      nic_ip = ip
213

    
214
    # TODO: check the ip address for uniqueness
215
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
216
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
217
                                 errors.ECODE_INVAL)
218

    
219
    # MAC address verification
220
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
221
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
222
      mac = utils.NormalizeAndValidateMac(mac)
223

    
224
      try:
225
        # TODO: We need to factor this out
226
        cfg.ReserveMAC(mac, ec_id)
227
      except errors.ReservationError:
228
        raise errors.OpPrereqError("MAC address %s already in use"
229
                                   " in cluster" % mac,
230
                                   errors.ECODE_NOTUNIQUE)
231

    
232
    #  Build nic parameters
233
    nicparams = {}
234
    if nic_mode_req:
235
      nicparams[constants.NIC_MODE] = nic_mode
236
    if link:
237
      nicparams[constants.NIC_LINK] = link
238

    
239
    check_params = cluster.SimpleFillNIC(nicparams)
240
    objects.NIC.CheckParameterSyntax(check_params)
241
    net_uuid = cfg.LookupNetwork(net)
242
    name = nic.get(constants.INIC_NAME, None)
243
    if name is not None and name.lower() == constants.VALUE_NONE:
244
      name = None
245
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
246
                          network=net_uuid, nicparams=nicparams)
247
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
248
    nics.append(nic_obj)
249

    
250
  return nics
251

    
252

    
253
def _CheckForConflictingIp(lu, ip, node_uuid):
254
  """In case of conflicting IP address raise error.
255

256
  @type ip: string
257
  @param ip: IP address
258
  @type node_uuid: string
259
  @param node_uuid: node UUID
260

261
  """
262
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
263
  if conf_net is not None:
264
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
265
                                " network %s, but the target NIC does not." %
266
                                (ip, conf_net)),
267
                               errors.ECODE_STATE)
268

    
269
  return (None, None)
270

    
271

    
272
def _ComputeIPolicyInstanceSpecViolation(
273
  ipolicy, instance_spec, disk_template,
274
  _compute_fn=ComputeIPolicySpecViolation):
275
  """Compute if instance specs meets the specs of ipolicy.
276

277
  @type ipolicy: dict
278
  @param ipolicy: The ipolicy to verify against
279
  @param instance_spec: dict
280
  @param instance_spec: The instance spec to verify
281
  @type disk_template: string
282
  @param disk_template: the disk template of the instance
283
  @param _compute_fn: The function to verify ipolicy (unittest only)
284
  @see: L{ComputeIPolicySpecViolation}
285

286
  """
287
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
288
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
289
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
290
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
291
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
292
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
293

    
294
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
295
                     disk_sizes, spindle_use, disk_template)
296

    
297

    
298
def _CheckOSVariant(os_obj, name):
299
  """Check whether an OS name conforms to the os variants specification.
300

301
  @type os_obj: L{objects.OS}
302
  @param os_obj: OS object to check
303
  @type name: string
304
  @param name: OS name passed by the user, to check for validity
305

306
  """
307
  variant = objects.OS.GetVariant(name)
308
  if not os_obj.supported_variants:
309
    if variant:
310
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
311
                                 " passed)" % (os_obj.name, variant),
312
                                 errors.ECODE_INVAL)
313
    return
314
  if not variant:
315
    raise errors.OpPrereqError("OS name must include a variant",
316
                               errors.ECODE_INVAL)
317

    
318
  if variant not in os_obj.supported_variants:
319
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
320

    
321

    
322
class LUInstanceCreate(LogicalUnit):
323
  """Create an instance.
324

325
  """
326
  HPATH = "instance-add"
327
  HTYPE = constants.HTYPE_INSTANCE
328
  REQ_BGL = False
329

    
330
  def CheckArguments(self):
331
    """Check arguments.
332

333
    """
334
    # do not require name_check to ease forward/backward compatibility
335
    # for tools
336
    if self.op.no_install and self.op.start:
337
      self.LogInfo("No-installation mode selected, disabling startup")
338
      self.op.start = False
339
    # validate/normalize the instance name
340
    self.op.instance_name = \
341
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
342

    
343
    if self.op.ip_check and not self.op.name_check:
344
      # TODO: make the ip check more flexible and not depend on the name check
345
      raise errors.OpPrereqError("Cannot do IP address check without a name"
346
                                 " check", errors.ECODE_INVAL)
347

    
348
    # check nics' parameter names
349
    for nic in self.op.nics:
350
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
351
    # check that NIC's parameters names are unique and valid
352
    utils.ValidateDeviceNames("NIC", self.op.nics)
353

    
354
    # check that disk's names are unique and valid
355
    utils.ValidateDeviceNames("disk", self.op.disks)
356

    
357
    cluster = self.cfg.GetClusterInfo()
358
    if not self.op.disk_template in cluster.enabled_disk_templates:
359
      raise errors.OpPrereqError("Cannot create an instance with disk template"
360
                                 " '%s', because it is not enabled in the"
361
                                 " cluster. Enabled disk templates are: %s." %
362
                                 (self.op.disk_template,
363
                                  ",".join(cluster.enabled_disk_templates)))
364

    
365
    # check disks. parameter names and consistent adopt/no-adopt strategy
366
    has_adopt = has_no_adopt = False
367
    for disk in self.op.disks:
368
      if self.op.disk_template != constants.DT_EXT:
369
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
370
      if constants.IDISK_ADOPT in disk:
371
        has_adopt = True
372
      else:
373
        has_no_adopt = True
374
    if has_adopt and has_no_adopt:
375
      raise errors.OpPrereqError("Either all disks are adopted or none is",
376
                                 errors.ECODE_INVAL)
377
    if has_adopt:
378
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
379
        raise errors.OpPrereqError("Disk adoption is not supported for the"
380
                                   " '%s' disk template" %
381
                                   self.op.disk_template,
382
                                   errors.ECODE_INVAL)
383
      if self.op.iallocator is not None:
384
        raise errors.OpPrereqError("Disk adoption not allowed with an"
385
                                   " iallocator script", errors.ECODE_INVAL)
386
      if self.op.mode == constants.INSTANCE_IMPORT:
387
        raise errors.OpPrereqError("Disk adoption not allowed for"
388
                                   " instance import", errors.ECODE_INVAL)
389
    else:
390
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
391
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
392
                                   " but no 'adopt' parameter given" %
393
                                   self.op.disk_template,
394
                                   errors.ECODE_INVAL)
395

    
396
    self.adopt_disks = has_adopt
397

    
398
    # instance name verification
399
    if self.op.name_check:
400
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
401
      self.op.instance_name = self.hostname.name
402
      # used in CheckPrereq for ip ping check
403
      self.check_ip = self.hostname.ip
404
    else:
405
      self.check_ip = None
406

    
407
    # file storage checks
408
    if (self.op.file_driver and
409
        not self.op.file_driver in constants.FILE_DRIVER):
410
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
411
                                 self.op.file_driver, errors.ECODE_INVAL)
412

    
413
    if self.op.disk_template == constants.DT_FILE:
414
      opcodes.RequireFileStorage()
415
    elif self.op.disk_template == constants.DT_SHARED_FILE:
416
      opcodes.RequireSharedFileStorage()
417

    
418
    ### Node/iallocator related checks
419
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
420

    
421
    if self.op.pnode is not None:
422
      if self.op.disk_template in constants.DTS_INT_MIRROR:
423
        if self.op.snode is None:
424
          raise errors.OpPrereqError("The networked disk templates need"
425
                                     " a mirror node", errors.ECODE_INVAL)
426
      elif self.op.snode:
427
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
428
                        " template")
429
        self.op.snode = None
430

    
431
    _CheckOpportunisticLocking(self.op)
432

    
433
    self._cds = GetClusterDomainSecret()
434

    
435
    if self.op.mode == constants.INSTANCE_IMPORT:
436
      # On import force_variant must be True, because if we forced it at
437
      # initial install, our only chance when importing it back is that it
438
      # works again!
439
      self.op.force_variant = True
440

    
441
      if self.op.no_install:
442
        self.LogInfo("No-installation mode has no effect during import")
443

    
444
    elif self.op.mode == constants.INSTANCE_CREATE:
445
      if self.op.os_type is None:
446
        raise errors.OpPrereqError("No guest OS specified",
447
                                   errors.ECODE_INVAL)
448
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
449
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
450
                                   " installation" % self.op.os_type,
451
                                   errors.ECODE_STATE)
452
      if self.op.disk_template is None:
453
        raise errors.OpPrereqError("No disk template specified",
454
                                   errors.ECODE_INVAL)
455

    
456
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
457
      # Check handshake to ensure both clusters have the same domain secret
458
      src_handshake = self.op.source_handshake
459
      if not src_handshake:
460
        raise errors.OpPrereqError("Missing source handshake",
461
                                   errors.ECODE_INVAL)
462

    
463
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
464
                                                           src_handshake)
465
      if errmsg:
466
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
467
                                   errors.ECODE_INVAL)
468

    
469
      # Load and check source CA
470
      self.source_x509_ca_pem = self.op.source_x509_ca
471
      if not self.source_x509_ca_pem:
472
        raise errors.OpPrereqError("Missing source X509 CA",
473
                                   errors.ECODE_INVAL)
474

    
475
      try:
476
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
477
                                                    self._cds)
478
      except OpenSSL.crypto.Error, err:
479
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
480
                                   (err, ), errors.ECODE_INVAL)
481

    
482
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
483
      if errcode is not None:
484
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
485
                                   errors.ECODE_INVAL)
486

    
487
      self.source_x509_ca = cert
488

    
489
      src_instance_name = self.op.source_instance_name
490
      if not src_instance_name:
491
        raise errors.OpPrereqError("Missing source instance name",
492
                                   errors.ECODE_INVAL)
493

    
494
      self.source_instance_name = \
495
        netutils.GetHostname(name=src_instance_name).name
496

    
497
    else:
498
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
499
                                 self.op.mode, errors.ECODE_INVAL)
500

    
501
  def ExpandNames(self):
502
    """ExpandNames for CreateInstance.
503

504
    Figure out the right locks for instance creation.
505

506
    """
507
    self.needed_locks = {}
508

    
509
    # this is just a preventive check, but someone might still add this
510
    # instance in the meantime, and creation will fail at lock-add time
511
    if self.op.instance_name in\
512
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
513
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
514
                                 self.op.instance_name, errors.ECODE_EXISTS)
515

    
516
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
517

    
518
    if self.op.iallocator:
519
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
520
      # specifying a group on instance creation and then selecting nodes from
521
      # that group
522
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
523
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
524

    
525
      if self.op.opportunistic_locking:
526
        self.opportunistic_locks[locking.LEVEL_NODE] = True
527
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
528
    else:
529
      (self.op.pnode_uuid, self.op.pnode) = \
530
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
531
      nodelist = [self.op.pnode_uuid]
532
      if self.op.snode is not None:
533
        (self.op.snode_uuid, self.op.snode) = \
534
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
535
        nodelist.append(self.op.snode_uuid)
536
      self.needed_locks[locking.LEVEL_NODE] = nodelist
537

    
538
    # in case of import lock the source node too
539
    if self.op.mode == constants.INSTANCE_IMPORT:
540
      src_node = self.op.src_node
541
      src_path = self.op.src_path
542

    
543
      if src_path is None:
544
        self.op.src_path = src_path = self.op.instance_name
545

    
546
      if src_node is None:
547
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
548
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
549
        self.op.src_node = None
550
        if os.path.isabs(src_path):
551
          raise errors.OpPrereqError("Importing an instance from a path"
552
                                     " requires a source node option",
553
                                     errors.ECODE_INVAL)
554
      else:
555
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
556
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
557
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
558
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
559
        if not os.path.isabs(src_path):
560
          self.op.src_path = src_path = \
561
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
562

    
563
    self.needed_locks[locking.LEVEL_NODE_RES] = \
564
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
565

    
566
  def _RunAllocator(self):
567
    """Run the allocator based on input opcode.
568

569
    """
570
    if self.op.opportunistic_locking:
571
      # Only consider nodes for which a lock is held
572
      node_name_whitelist = self.cfg.GetNodeNames(
573
        self.owned_locks(locking.LEVEL_NODE))
574
    else:
575
      node_name_whitelist = None
576

    
577
    #TODO Export network to iallocator so that it chooses a pnode
578
    #     in a nodegroup that has the desired network connected to
579
    req = _CreateInstanceAllocRequest(self.op, self.disks,
580
                                      self.nics, self.be_full,
581
                                      node_name_whitelist)
582
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
583

    
584
    ial.Run(self.op.iallocator)
585

    
586
    if not ial.success:
587
      # When opportunistic locks are used only a temporary failure is generated
588
      if self.op.opportunistic_locking:
589
        ecode = errors.ECODE_TEMP_NORES
590
      else:
591
        ecode = errors.ECODE_NORES
592

    
593
      raise errors.OpPrereqError("Can't compute nodes using"
594
                                 " iallocator '%s': %s" %
595
                                 (self.op.iallocator, ial.info),
596
                                 ecode)
597

    
598
    (self.op.pnode_uuid, self.op.pnode) = \
599
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
600
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
601
                 self.op.instance_name, self.op.iallocator,
602
                 utils.CommaJoin(ial.result))
603

    
604
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
605

    
606
    if req.RequiredNodes() == 2:
607
      (self.op.snode_uuid, self.op.snode) = \
608
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
609

    
610
  def BuildHooksEnv(self):
611
    """Build hooks env.
612

613
    This runs on master, primary and secondary nodes of the instance.
614

615
    """
616
    env = {
617
      "ADD_MODE": self.op.mode,
618
      }
619
    if self.op.mode == constants.INSTANCE_IMPORT:
620
      env["SRC_NODE"] = self.op.src_node
621
      env["SRC_PATH"] = self.op.src_path
622
      env["SRC_IMAGES"] = self.src_images
623

    
624
    env.update(BuildInstanceHookEnv(
625
      name=self.op.instance_name,
626
      primary_node_name=self.op.pnode,
627
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
628
      status=self.op.start,
629
      os_type=self.op.os_type,
630
      minmem=self.be_full[constants.BE_MINMEM],
631
      maxmem=self.be_full[constants.BE_MAXMEM],
632
      vcpus=self.be_full[constants.BE_VCPUS],
633
      nics=NICListToTuple(self, self.nics),
634
      disk_template=self.op.disk_template,
635
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
636
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
637
             for d in self.disks],
638
      bep=self.be_full,
639
      hvp=self.hv_full,
640
      hypervisor_name=self.op.hypervisor,
641
      tags=self.op.tags,
642
      ))
643

    
644
    return env
645

    
646
  def BuildHooksNodes(self):
647
    """Build hooks nodes.
648

649
    """
650
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
651
    return nl, nl
652

    
653
  def _ReadExportInfo(self):
654
    """Reads the export information from disk.
655

656
    It will override the opcode source node and path with the actual
657
    information, if these two were not specified before.
658

659
    @return: the export information
660

661
    """
662
    assert self.op.mode == constants.INSTANCE_IMPORT
663

    
664
    if self.op.src_node_uuid is None:
665
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
666
      exp_list = self.rpc.call_export_list(locked_nodes)
667
      found = False
668
      for node in exp_list:
669
        if exp_list[node].fail_msg:
670
          continue
671
        if self.op.src_path in exp_list[node].payload:
672
          found = True
673
          self.op.src_node = node
674
          self.op.src_node_uuid = self.cfg.GetNodeInfoByName(node).uuid
675
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
676
                                            self.op.src_path)
677
          break
678
      if not found:
679
        raise errors.OpPrereqError("No export found for relative path %s" %
680
                                   self.op.src_path, errors.ECODE_INVAL)
681

    
682
    CheckNodeOnline(self, self.op.src_node_uuid)
683
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
684
    result.Raise("No export or invalid export found in dir %s" %
685
                 self.op.src_path)
686

    
687
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
688
    if not export_info.has_section(constants.INISECT_EXP):
689
      raise errors.ProgrammerError("Corrupted export config",
690
                                   errors.ECODE_ENVIRON)
691

    
692
    ei_version = export_info.get(constants.INISECT_EXP, "version")
693
    if int(ei_version) != constants.EXPORT_VERSION:
694
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
695
                                 (ei_version, constants.EXPORT_VERSION),
696
                                 errors.ECODE_ENVIRON)
697
    return export_info
698

    
699
  def _ReadExportParams(self, einfo):
700
    """Use export parameters as defaults.
701

702
    In case the opcode doesn't specify (as in override) some instance
703
    parameters, then try to use them from the export information, if
704
    that declares them.
705

706
    """
707
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
708

    
709
    if self.op.disk_template is None:
710
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
711
        self.op.disk_template = einfo.get(constants.INISECT_INS,
712
                                          "disk_template")
713
        if self.op.disk_template not in constants.DISK_TEMPLATES:
714
          raise errors.OpPrereqError("Disk template specified in configuration"
715
                                     " file is not one of the allowed values:"
716
                                     " %s" %
717
                                     " ".join(constants.DISK_TEMPLATES),
718
                                     errors.ECODE_INVAL)
719
      else:
720
        raise errors.OpPrereqError("No disk template specified and the export"
721
                                   " is missing the disk_template information",
722
                                   errors.ECODE_INVAL)
723

    
724
    if not self.op.disks:
725
      disks = []
726
      # TODO: import the disk iv_name too
727
      for idx in range(constants.MAX_DISKS):
728
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
729
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
730
          disks.append({constants.IDISK_SIZE: disk_sz})
731
      self.op.disks = disks
732
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
733
        raise errors.OpPrereqError("No disk info specified and the export"
734
                                   " is missing the disk information",
735
                                   errors.ECODE_INVAL)
736

    
737
    if not self.op.nics:
738
      nics = []
739
      for idx in range(constants.MAX_NICS):
740
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
741
          ndict = {}
742
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
743
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
744
            ndict[name] = v
745
          nics.append(ndict)
746
        else:
747
          break
748
      self.op.nics = nics
749

    
750
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
751
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
752

    
753
    if (self.op.hypervisor is None and
754
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
755
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
756

    
757
    if einfo.has_section(constants.INISECT_HYP):
758
      # use the export parameters but do not override the ones
759
      # specified by the user
760
      for name, value in einfo.items(constants.INISECT_HYP):
761
        if name not in self.op.hvparams:
762
          self.op.hvparams[name] = value
763

    
764
    if einfo.has_section(constants.INISECT_BEP):
765
      # use the parameters, without overriding
766
      for name, value in einfo.items(constants.INISECT_BEP):
767
        if name not in self.op.beparams:
768
          self.op.beparams[name] = value
769
        # Compatibility for the old "memory" be param
770
        if name == constants.BE_MEMORY:
771
          if constants.BE_MAXMEM not in self.op.beparams:
772
            self.op.beparams[constants.BE_MAXMEM] = value
773
          if constants.BE_MINMEM not in self.op.beparams:
774
            self.op.beparams[constants.BE_MINMEM] = value
775
    else:
776
      # try to read the parameters old style, from the main section
777
      for name in constants.BES_PARAMETERS:
778
        if (name not in self.op.beparams and
779
            einfo.has_option(constants.INISECT_INS, name)):
780
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
781

    
782
    if einfo.has_section(constants.INISECT_OSP):
783
      # use the parameters, without overriding
784
      for name, value in einfo.items(constants.INISECT_OSP):
785
        if name not in self.op.osparams:
786
          self.op.osparams[name] = value
787

    
788
  def _RevertToDefaults(self, cluster):
789
    """Revert the instance parameters to the default values.
790

791
    """
792
    # hvparams
793
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
794
    for name in self.op.hvparams.keys():
795
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
796
        del self.op.hvparams[name]
797
    # beparams
798
    be_defs = cluster.SimpleFillBE({})
799
    for name in self.op.beparams.keys():
800
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
801
        del self.op.beparams[name]
802
    # nic params
803
    nic_defs = cluster.SimpleFillNIC({})
804
    for nic in self.op.nics:
805
      for name in constants.NICS_PARAMETERS:
806
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
807
          del nic[name]
808
    # osparams
809
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
810
    for name in self.op.osparams.keys():
811
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
812
        del self.op.osparams[name]
813

    
814
  def _CalculateFileStorageDir(self):
815
    """Calculate final instance file storage dir.
816

817
    """
818
    # file storage dir calculation/check
819
    self.instance_file_storage_dir = None
820
    if self.op.disk_template in constants.DTS_FILEBASED:
821
      # build the full file storage dir path
822
      joinargs = []
823

    
824
      if self.op.disk_template == constants.DT_SHARED_FILE:
825
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
826
      else:
827
        get_fsd_fn = self.cfg.GetFileStorageDir
828

    
829
      cfg_storagedir = get_fsd_fn()
830
      if not cfg_storagedir:
831
        raise errors.OpPrereqError("Cluster file storage dir not defined",
832
                                   errors.ECODE_STATE)
833
      joinargs.append(cfg_storagedir)
834

    
835
      if self.op.file_storage_dir is not None:
836
        joinargs.append(self.op.file_storage_dir)
837

    
838
      joinargs.append(self.op.instance_name)
839

    
840
      # pylint: disable=W0142
841
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
842

    
843
  def CheckPrereq(self): # pylint: disable=R0914
844
    """Check prerequisites.
845

846
    """
847
    self._CalculateFileStorageDir()
848

    
849
    if self.op.mode == constants.INSTANCE_IMPORT:
850
      export_info = self._ReadExportInfo()
851
      self._ReadExportParams(export_info)
852
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
853
    else:
854
      self._old_instance_name = None
855

    
856
    if (not self.cfg.GetVGName() and
857
        self.op.disk_template not in constants.DTS_NOT_LVM):
858
      raise errors.OpPrereqError("Cluster does not support lvm-based"
859
                                 " instances", errors.ECODE_STATE)
860

    
861
    if (self.op.hypervisor is None or
862
        self.op.hypervisor == constants.VALUE_AUTO):
863
      self.op.hypervisor = self.cfg.GetHypervisorType()
864

    
865
    cluster = self.cfg.GetClusterInfo()
866
    enabled_hvs = cluster.enabled_hypervisors
867
    if self.op.hypervisor not in enabled_hvs:
868
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
869
                                 " cluster (%s)" %
870
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
871
                                 errors.ECODE_STATE)
872

    
873
    # Check tag validity
874
    for tag in self.op.tags:
875
      objects.TaggableObject.ValidateTag(tag)
876

    
877
    # check hypervisor parameter syntax (locally)
878
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
879
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
880
                                      self.op.hvparams)
881
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
882
    hv_type.CheckParameterSyntax(filled_hvp)
883
    self.hv_full = filled_hvp
884
    # check that we don't specify global parameters on an instance
885
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
886
                         "instance", "cluster")
887

    
888
    # fill and remember the beparams dict
889
    self.be_full = _ComputeFullBeParams(self.op, cluster)
890

    
891
    # build os parameters
892
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
893

    
894
    # now that hvp/bep are in final format, let's reset to defaults,
895
    # if told to do so
896
    if self.op.identify_defaults:
897
      self._RevertToDefaults(cluster)
898

    
899
    # NIC buildup
900
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
901
                             self.proc.GetECId())
902

    
903
    # disk checks/pre-build
904
    default_vg = self.cfg.GetVGName()
905
    self.disks = ComputeDisks(self.op, default_vg)
906

    
907
    if self.op.mode == constants.INSTANCE_IMPORT:
908
      disk_images = []
909
      for idx in range(len(self.disks)):
910
        option = "disk%d_dump" % idx
911
        if export_info.has_option(constants.INISECT_INS, option):
912
          # FIXME: are the old os-es, disk sizes, etc. useful?
913
          export_name = export_info.get(constants.INISECT_INS, option)
914
          image = utils.PathJoin(self.op.src_path, export_name)
915
          disk_images.append(image)
916
        else:
917
          disk_images.append(False)
918

    
919
      self.src_images = disk_images
920

    
921
      if self.op.instance_name == self._old_instance_name:
922
        for idx, nic in enumerate(self.nics):
923
          if nic.mac == constants.VALUE_AUTO:
924
            nic_mac_ini = "nic%d_mac" % idx
925
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
926

    
927
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
928

    
929
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
930
    if self.op.ip_check:
931
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
932
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
933
                                   (self.check_ip, self.op.instance_name),
934
                                   errors.ECODE_NOTUNIQUE)
935

    
936
    #### mac address generation
937
    # By generating here the mac address both the allocator and the hooks get
938
    # the real final mac address rather than the 'auto' or 'generate' value.
939
    # There is a race condition between the generation and the instance object
940
    # creation, which means that we know the mac is valid now, but we're not
941
    # sure it will be when we actually add the instance. If things go bad
942
    # adding the instance will abort because of a duplicate mac, and the
943
    # creation job will fail.
944
    for nic in self.nics:
945
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
946
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
947

    
948
    #### allocator run
949

    
950
    if self.op.iallocator is not None:
951
      self._RunAllocator()
952

    
953
    # Release all unneeded node locks
954
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
955
                               self.op.src_node_uuid])
956
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
957
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
958
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
959

    
960
    assert (self.owned_locks(locking.LEVEL_NODE) ==
961
            self.owned_locks(locking.LEVEL_NODE_RES)), \
962
      "Node locks differ from node resource locks"
963

    
964
    #### node related checks
965

    
966
    # check primary node
967
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
968
    assert self.pnode is not None, \
969
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
970
    if pnode.offline:
971
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
972
                                 pnode.name, errors.ECODE_STATE)
973
    if pnode.drained:
974
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
975
                                 pnode.name, errors.ECODE_STATE)
976
    if not pnode.vm_capable:
977
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
978
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
979

    
980
    self.secondaries = []
981

    
982
    # Fill in any IPs from IP pools. This must happen here, because we need to
983
    # know the nic's primary node, as specified by the iallocator
984
    for idx, nic in enumerate(self.nics):
985
      net_uuid = nic.network
986
      if net_uuid is not None:
987
        nobj = self.cfg.GetNetwork(net_uuid)
988
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
989
        if netparams is None:
990
          raise errors.OpPrereqError("No netparams found for network"
991
                                     " %s. Propably not connected to"
992
                                     " node's %s nodegroup" %
993
                                     (nobj.name, self.pnode.name),
994
                                     errors.ECODE_INVAL)
995
        self.LogInfo("NIC/%d inherits netparams %s" %
996
                     (idx, netparams.values()))
997
        nic.nicparams = dict(netparams)
998
        if nic.ip is not None:
999
          if nic.ip.lower() == constants.NIC_IP_POOL:
1000
            try:
1001
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1002
            except errors.ReservationError:
1003
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1004
                                         " from the address pool" % idx,
1005
                                         errors.ECODE_STATE)
1006
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1007
          else:
1008
            try:
1009
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1010
            except errors.ReservationError:
1011
              raise errors.OpPrereqError("IP address %s already in use"
1012
                                         " or does not belong to network %s" %
1013
                                         (nic.ip, nobj.name),
1014
                                         errors.ECODE_NOTUNIQUE)
1015

    
1016
      # net is None, ip None or given
1017
      elif self.op.conflicts_check:
1018
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1019

    
1020
    # mirror node verification
1021
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1022
      if self.op.snode_uuid == pnode.uuid:
1023
        raise errors.OpPrereqError("The secondary node cannot be the"
1024
                                   " primary node", errors.ECODE_INVAL)
1025
      CheckNodeOnline(self, self.op.snode_uuid)
1026
      CheckNodeNotDrained(self, self.op.snode_uuid)
1027
      CheckNodeVmCapable(self, self.op.snode_uuid)
1028
      self.secondaries.append(self.op.snode_uuid)
1029

    
1030
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1031
      if pnode.group != snode.group:
1032
        self.LogWarning("The primary and secondary nodes are in two"
1033
                        " different node groups; the disk parameters"
1034
                        " from the first disk's node group will be"
1035
                        " used")
1036

    
1037
    nodes = [pnode]
1038
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1039
      nodes.append(snode)
1040
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1041
    excl_stor = compat.any(map(has_es, nodes))
1042
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1043
      raise errors.OpPrereqError("Disk template %s not supported with"
1044
                                 " exclusive storage" % self.op.disk_template,
1045
                                 errors.ECODE_STATE)
1046
    for disk in self.disks:
1047
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1048

    
1049
    node_uuids = [pnode.uuid] + self.secondaries
1050

    
1051
    if not self.adopt_disks:
1052
      if self.op.disk_template == constants.DT_RBD:
1053
        # _CheckRADOSFreeSpace() is just a placeholder.
1054
        # Any function that checks prerequisites can be placed here.
1055
        # Check if there is enough space on the RADOS cluster.
1056
        CheckRADOSFreeSpace()
1057
      elif self.op.disk_template == constants.DT_EXT:
1058
        # FIXME: Function that checks prereqs if needed
1059
        pass
1060
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1061
        # Check lv size requirements, if not adopting
1062
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1063
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1064
      else:
1065
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1066
        pass
1067

    
1068
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1069
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1070
                                disk[constants.IDISK_ADOPT])
1071
                     for disk in self.disks])
1072
      if len(all_lvs) != len(self.disks):
1073
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1074
                                   errors.ECODE_INVAL)
1075
      for lv_name in all_lvs:
1076
        try:
1077
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1078
          # to ReserveLV uses the same syntax
1079
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1080
        except errors.ReservationError:
1081
          raise errors.OpPrereqError("LV named %s used by another instance" %
1082
                                     lv_name, errors.ECODE_NOTUNIQUE)
1083

    
1084
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1085
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1086

    
1087
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1088
                                       vg_names.payload.keys())[pnode.uuid]
1089
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1090
      node_lvs = node_lvs.payload
1091

    
1092
      delta = all_lvs.difference(node_lvs.keys())
1093
      if delta:
1094
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1095
                                   utils.CommaJoin(delta),
1096
                                   errors.ECODE_INVAL)
1097
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1098
      if online_lvs:
1099
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1100
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1101
                                   errors.ECODE_STATE)
1102
      # update the size of disk based on what is found
1103
      for dsk in self.disks:
1104
        dsk[constants.IDISK_SIZE] = \
1105
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1106
                                        dsk[constants.IDISK_ADOPT])][0]))
1107

    
1108
    elif self.op.disk_template == constants.DT_BLOCK:
1109
      # Normalize and de-duplicate device paths
1110
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1111
                       for disk in self.disks])
1112
      if len(all_disks) != len(self.disks):
1113
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1114
                                   errors.ECODE_INVAL)
1115
      baddisks = [d for d in all_disks
1116
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1117
      if baddisks:
1118
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1119
                                   " cannot be adopted" %
1120
                                   (utils.CommaJoin(baddisks),
1121
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1122
                                   errors.ECODE_INVAL)
1123

    
1124
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1125
                                            list(all_disks))[pnode.uuid]
1126
      node_disks.Raise("Cannot get block device information from node %s" %
1127
                       pnode.name)
1128
      node_disks = node_disks.payload
1129
      delta = all_disks.difference(node_disks.keys())
1130
      if delta:
1131
        raise errors.OpPrereqError("Missing block device(s): %s" %
1132
                                   utils.CommaJoin(delta),
1133
                                   errors.ECODE_INVAL)
1134
      for dsk in self.disks:
1135
        dsk[constants.IDISK_SIZE] = \
1136
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1137

    
1138
    # Verify instance specs
1139
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1140
    ispec = {
1141
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1142
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1143
      constants.ISPEC_DISK_COUNT: len(self.disks),
1144
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1145
                                  for disk in self.disks],
1146
      constants.ISPEC_NIC_COUNT: len(self.nics),
1147
      constants.ISPEC_SPINDLE_USE: spindle_use,
1148
      }
1149

    
1150
    group_info = self.cfg.GetNodeGroup(pnode.group)
1151
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1152
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1153
                                               self.op.disk_template)
1154
    if not self.op.ignore_ipolicy and res:
1155
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1156
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1157
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1158

    
1159
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1160

    
1161
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1162
    # check OS parameters (remotely)
1163
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1164

    
1165
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1166

    
1167
    #TODO: _CheckExtParams (remotely)
1168
    # Check parameters for extstorage
1169

    
1170
    # memory check on primary node
1171
    #TODO(dynmem): use MINMEM for checking
1172
    if self.op.start:
1173
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1174
                                self.op.hvparams)
1175
      CheckNodeFreeMemory(self, self.pnode.uuid,
1176
                          "creating instance %s" % self.op.instance_name,
1177
                          self.be_full[constants.BE_MAXMEM],
1178
                          self.op.hypervisor, hvfull)
1179

    
1180
    self.dry_run_result = list(node_uuids)
1181

    
1182
  def Exec(self, feedback_fn):
1183
    """Create and add the instance to the cluster.
1184

1185
    """
1186
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1187
                self.owned_locks(locking.LEVEL_NODE)), \
1188
      "Node locks differ from node resource locks"
1189
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1190

    
1191
    ht_kind = self.op.hypervisor
1192
    if ht_kind in constants.HTS_REQ_PORT:
1193
      network_port = self.cfg.AllocatePort()
1194
    else:
1195
      network_port = None
1196

    
1197
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1198

    
1199
    # This is ugly but we got a chicken-egg problem here
1200
    # We can only take the group disk parameters, as the instance
1201
    # has no disks yet (we are generating them right here).
1202
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1203
    disks = GenerateDiskTemplate(self,
1204
                                 self.op.disk_template,
1205
                                 instance_uuid, self.pnode.uuid,
1206
                                 self.secondaries,
1207
                                 self.disks,
1208
                                 self.instance_file_storage_dir,
1209
                                 self.op.file_driver,
1210
                                 0,
1211
                                 feedback_fn,
1212
                                 self.cfg.GetGroupDiskParams(nodegroup))
1213

    
1214
    iobj = objects.Instance(name=self.op.instance_name,
1215
                            uuid=instance_uuid,
1216
                            os=self.op.os_type,
1217
                            primary_node=self.pnode.uuid,
1218
                            nics=self.nics, disks=disks,
1219
                            disk_template=self.op.disk_template,
1220
                            disks_active=False,
1221
                            admin_state=constants.ADMINST_DOWN,
1222
                            network_port=network_port,
1223
                            beparams=self.op.beparams,
1224
                            hvparams=self.op.hvparams,
1225
                            hypervisor=self.op.hypervisor,
1226
                            osparams=self.op.osparams,
1227
                            )
1228

    
1229
    if self.op.tags:
1230
      for tag in self.op.tags:
1231
        iobj.AddTag(tag)
1232

    
1233
    if self.adopt_disks:
1234
      if self.op.disk_template == constants.DT_PLAIN:
1235
        # rename LVs to the newly-generated names; we need to construct
1236
        # 'fake' LV disks with the old data, plus the new unique_id
1237
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1238
        rename_to = []
1239
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1240
          rename_to.append(t_dsk.logical_id)
1241
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1242
          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1243
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1244
                                               zip(tmp_disks, rename_to))
1245
        result.Raise("Failed to rename adoped LVs")
1246
    else:
1247
      feedback_fn("* creating instance disks...")
1248
      try:
1249
        CreateDisks(self, iobj)
1250
      except errors.OpExecError:
1251
        self.LogWarning("Device creation failed")
1252
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1253
        raise
1254

    
1255
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1256

    
1257
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1258

    
1259
    # Declare that we don't want to remove the instance lock anymore, as we've
1260
    # added the instance to the config
1261
    del self.remove_locks[locking.LEVEL_INSTANCE]
1262

    
1263
    if self.op.mode == constants.INSTANCE_IMPORT:
1264
      # Release unused nodes
1265
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1266
    else:
1267
      # Release all nodes
1268
      ReleaseLocks(self, locking.LEVEL_NODE)
1269

    
1270
    disk_abort = False
1271
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1272
      feedback_fn("* wiping instance disks...")
1273
      try:
1274
        WipeDisks(self, iobj)
1275
      except errors.OpExecError, err:
1276
        logging.exception("Wiping disks failed")
1277
        self.LogWarning("Wiping instance disks failed (%s)", err)
1278
        disk_abort = True
1279

    
1280
    if disk_abort:
1281
      # Something is already wrong with the disks, don't do anything else
1282
      pass
1283
    elif self.op.wait_for_sync:
1284
      disk_abort = not WaitForSync(self, iobj)
1285
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1286
      # make sure the disks are not degraded (still sync-ing is ok)
1287
      feedback_fn("* checking mirrors status")
1288
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1289
    else:
1290
      disk_abort = False
1291

    
1292
    if disk_abort:
1293
      RemoveDisks(self, iobj)
1294
      self.cfg.RemoveInstance(iobj.uuid)
1295
      # Make sure the instance lock gets removed
1296
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1297
      raise errors.OpExecError("There are some degraded disks for"
1298
                               " this instance")
1299

    
1300
    # instance disks are now active
1301
    iobj.disks_active = True
1302

    
1303
    # Release all node resource locks
1304
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1305

    
1306
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1307
      # we need to set the disks ID to the primary node, since the
1308
      # preceding code might or might have not done it, depending on
1309
      # disk template and other options
1310
      for disk in iobj.disks:
1311
        self.cfg.SetDiskID(disk, self.pnode.uuid)
1312
      if self.op.mode == constants.INSTANCE_CREATE:
1313
        if not self.op.no_install:
1314
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1315
                        not self.op.wait_for_sync)
1316
          if pause_sync:
1317
            feedback_fn("* pausing disk sync to install instance OS")
1318
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1319
                                                              (iobj.disks,
1320
                                                               iobj), True)
1321
            for idx, success in enumerate(result.payload):
1322
              if not success:
1323
                logging.warn("pause-sync of instance %s for disk %d failed",
1324
                             self.op.instance_name, idx)
1325

    
1326
          feedback_fn("* running the instance OS create scripts...")
1327
          # FIXME: pass debug option from opcode to backend
1328
          os_add_result = \
1329
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1330
                                          self.op.debug_level)
1331
          if pause_sync:
1332
            feedback_fn("* resuming disk sync")
1333
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1334
                                                              (iobj.disks,
1335
                                                               iobj), False)
1336
            for idx, success in enumerate(result.payload):
1337
              if not success:
1338
                logging.warn("resume-sync of instance %s for disk %d failed",
1339
                             self.op.instance_name, idx)
1340

    
1341
          os_add_result.Raise("Could not add os for instance %s"
1342
                              " on node %s" % (self.op.instance_name,
1343
                                               self.pnode.name))
1344

    
1345
      else:
1346
        if self.op.mode == constants.INSTANCE_IMPORT:
1347
          feedback_fn("* running the instance OS import scripts...")
1348

    
1349
          transfers = []
1350

    
1351
          for idx, image in enumerate(self.src_images):
1352
            if not image:
1353
              continue
1354

    
1355
            # FIXME: pass debug option from opcode to backend
1356
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1357
                                               constants.IEIO_FILE, (image, ),
1358
                                               constants.IEIO_SCRIPT,
1359
                                               (iobj.disks[idx], idx),
1360
                                               None)
1361
            transfers.append(dt)
1362

    
1363
          import_result = \
1364
            masterd.instance.TransferInstanceData(self, feedback_fn,
1365
                                                  self.op.src_node_uuid,
1366
                                                  self.pnode.uuid,
1367
                                                  self.pnode.secondary_ip,
1368
                                                  iobj, transfers)
1369
          if not compat.all(import_result):
1370
            self.LogWarning("Some disks for instance %s on node %s were not"
1371
                            " imported successfully" % (self.op.instance_name,
1372
                                                        self.pnode.name))
1373

    
1374
          rename_from = self._old_instance_name
1375

    
1376
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1377
          feedback_fn("* preparing remote import...")
1378
          # The source cluster will stop the instance before attempting to make
1379
          # a connection. In some cases stopping an instance can take a long
1380
          # time, hence the shutdown timeout is added to the connection
1381
          # timeout.
1382
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1383
                             self.op.source_shutdown_timeout)
1384
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1385

    
1386
          assert iobj.primary_node == self.pnode.uuid
1387
          disk_results = \
1388
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1389
                                          self.source_x509_ca,
1390
                                          self._cds, timeouts)
1391
          if not compat.all(disk_results):
1392
            # TODO: Should the instance still be started, even if some disks
1393
            # failed to import (valid for local imports, too)?
1394
            self.LogWarning("Some disks for instance %s on node %s were not"
1395
                            " imported successfully" % (self.op.instance_name,
1396
                                                        self.pnode.name))
1397

    
1398
          rename_from = self.source_instance_name
1399

    
1400
        else:
1401
          # also checked in the prereq part
1402
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1403
                                       % self.op.mode)
1404

    
1405
        # Run rename script on newly imported instance
1406
        assert iobj.name == self.op.instance_name
1407
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1408
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1409
                                                   rename_from,
1410
                                                   self.op.debug_level)
1411
        result.Warn("Failed to run rename script for %s on node %s" %
1412
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1413

    
1414
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1415

    
1416
    if self.op.start:
1417
      iobj.admin_state = constants.ADMINST_UP
1418
      self.cfg.Update(iobj, feedback_fn)
1419
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1420
                   self.pnode.name)
1421
      feedback_fn("* starting instance...")
1422
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1423
                                            False, self.op.reason)
1424
      result.Raise("Could not start instance")
1425

    
1426
    return list(iobj.all_nodes)
1427

    
1428

    
1429
class LUInstanceRename(LogicalUnit):
1430
  """Rename an instance.
1431

1432
  """
1433
  HPATH = "instance-rename"
1434
  HTYPE = constants.HTYPE_INSTANCE
1435

    
1436
  def CheckArguments(self):
1437
    """Check arguments.
1438

1439
    """
1440
    if self.op.ip_check and not self.op.name_check:
1441
      # TODO: make the ip check more flexible and not depend on the name check
1442
      raise errors.OpPrereqError("IP address check requires a name check",
1443
                                 errors.ECODE_INVAL)
1444

    
1445
  def BuildHooksEnv(self):
1446
    """Build hooks env.
1447

1448
    This runs on master, primary and secondary nodes of the instance.
1449

1450
    """
1451
    env = BuildInstanceHookEnvByObject(self, self.instance)
1452
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1453
    return env
1454

    
1455
  def BuildHooksNodes(self):
1456
    """Build hooks nodes.
1457

1458
    """
1459
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1460
    return (nl, nl)
1461

    
1462
  def CheckPrereq(self):
1463
    """Check prerequisites.
1464

1465
    This checks that the instance is in the cluster and is not running.
1466

1467
    """
1468
    (self.op.instance_uuid, self.op.instance_name) = \
1469
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1470
                                self.op.instance_name)
1471
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1472
    assert instance is not None
1473
    CheckNodeOnline(self, instance.primary_node)
1474
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1475
                       msg="cannot rename")
1476
    self.instance = instance
1477

    
1478
    new_name = self.op.new_name
1479
    if self.op.name_check:
1480
      hostname = _CheckHostnameSane(self, new_name)
1481
      new_name = self.op.new_name = hostname.name
1482
      if (self.op.ip_check and
1483
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1484
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1485
                                   (hostname.ip, new_name),
1486
                                   errors.ECODE_NOTUNIQUE)
1487

    
1488
    instance_names = [inst.name for
1489
                      inst in self.cfg.GetAllInstancesInfo().values()]
1490
    if new_name in instance_names and new_name != instance.name:
1491
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1492
                                 new_name, errors.ECODE_EXISTS)
1493

    
1494
  def Exec(self, feedback_fn):
1495
    """Rename the instance.
1496

1497
    """
1498
    old_name = self.instance.name
1499

    
1500
    rename_file_storage = False
1501
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1502
        self.op.new_name != self.instance.name):
1503
      old_file_storage_dir = os.path.dirname(
1504
                               self.instance.disks[0].logical_id[1])
1505
      rename_file_storage = True
1506

    
1507
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1508
    # Change the instance lock. This is definitely safe while we hold the BGL.
1509
    # Otherwise the new lock would have to be added in acquired mode.
1510
    assert self.REQ_BGL
1511
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1512
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1513
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1514

    
1515
    # re-read the instance from the configuration after rename
1516
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1517

    
1518
    if rename_file_storage:
1519
      new_file_storage_dir = os.path.dirname(
1520
                               renamed_inst.disks[0].logical_id[1])
1521
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1522
                                                     old_file_storage_dir,
1523
                                                     new_file_storage_dir)
1524
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1525
                   " (but the instance has been renamed in Ganeti)" %
1526
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1527
                    old_file_storage_dir, new_file_storage_dir))
1528

    
1529
    StartInstanceDisks(self, renamed_inst, None)
1530
    # update info on disks
1531
    info = GetInstanceInfoText(renamed_inst)
1532
    for (idx, disk) in enumerate(renamed_inst.disks):
1533
      for node_uuid in renamed_inst.all_nodes:
1534
        self.cfg.SetDiskID(disk, node_uuid)
1535
        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1536
        result.Warn("Error setting info on node %s for disk %s" %
1537
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1538
    try:
1539
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1540
                                                 renamed_inst, old_name,
1541
                                                 self.op.debug_level)
1542
      result.Warn("Could not run OS rename script for instance %s on node %s"
1543
                  " (but the instance has been renamed in Ganeti)" %
1544
                  (renamed_inst.name,
1545
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1546
                  self.LogWarning)
1547
    finally:
1548
      ShutdownInstanceDisks(self, renamed_inst)
1549

    
1550
    return renamed_inst.name
1551

    
1552

    
1553
class LUInstanceRemove(LogicalUnit):
1554
  """Remove an instance.
1555

1556
  """
1557
  HPATH = "instance-remove"
1558
  HTYPE = constants.HTYPE_INSTANCE
1559
  REQ_BGL = False
1560

    
1561
  def ExpandNames(self):
1562
    self._ExpandAndLockInstance()
1563
    self.needed_locks[locking.LEVEL_NODE] = []
1564
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1565
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1566

    
1567
  def DeclareLocks(self, level):
1568
    if level == locking.LEVEL_NODE:
1569
      self._LockInstancesNodes()
1570
    elif level == locking.LEVEL_NODE_RES:
1571
      # Copy node locks
1572
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1573
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1574

    
1575
  def BuildHooksEnv(self):
1576
    """Build hooks env.
1577

1578
    This runs on master, primary and secondary nodes of the instance.
1579

1580
    """
1581
    env = BuildInstanceHookEnvByObject(self, self.instance)
1582
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1583
    return env
1584

    
1585
  def BuildHooksNodes(self):
1586
    """Build hooks nodes.
1587

1588
    """
1589
    nl = [self.cfg.GetMasterNode()]
1590
    nl_post = list(self.instance.all_nodes) + nl
1591
    return (nl, nl_post)
1592

    
1593
  def CheckPrereq(self):
1594
    """Check prerequisites.
1595

1596
    This checks that the instance is in the cluster.
1597

1598
    """
1599
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1600
    assert self.instance is not None, \
1601
      "Cannot retrieve locked instance %s" % self.op.instance_name
1602

    
1603
  def Exec(self, feedback_fn):
1604
    """Remove the instance.
1605

1606
    """
1607
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1608
                 self.cfg.GetNodeName(self.instance.primary_node))
1609

    
1610
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1611
                                             self.instance,
1612
                                             self.op.shutdown_timeout,
1613
                                             self.op.reason)
1614
    if self.op.ignore_failures:
1615
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1616
    else:
1617
      result.Raise("Could not shutdown instance %s on node %s" %
1618
                   (self.instance.name,
1619
                    self.cfg.GetNodeName(self.instance.primary_node)))
1620

    
1621
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1622
            self.owned_locks(locking.LEVEL_NODE_RES))
1623
    assert not (set(self.instance.all_nodes) -
1624
                self.owned_locks(locking.LEVEL_NODE)), \
1625
      "Not owning correct locks"
1626

    
1627
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1628

    
1629

    
1630
class LUInstanceMove(LogicalUnit):
1631
  """Move an instance by data-copying.
1632

1633
  """
1634
  HPATH = "instance-move"
1635
  HTYPE = constants.HTYPE_INSTANCE
1636
  REQ_BGL = False
1637

    
1638
  def ExpandNames(self):
1639
    self._ExpandAndLockInstance()
1640
    (self.op.target_node_uuid, self.op.target_node) = \
1641
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1642
                            self.op.target_node)
1643
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node]
1644
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1645
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1646

    
1647
  def DeclareLocks(self, level):
1648
    if level == locking.LEVEL_NODE:
1649
      self._LockInstancesNodes(primary_only=True)
1650
    elif level == locking.LEVEL_NODE_RES:
1651
      # Copy node locks
1652
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1653
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1654

    
1655
  def BuildHooksEnv(self):
1656
    """Build hooks env.
1657

1658
    This runs on master, primary and secondary nodes of the instance.
1659

1660
    """
1661
    env = {
1662
      "TARGET_NODE": self.op.target_node,
1663
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1664
      }
1665
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1666
    return env
1667

    
1668
  def BuildHooksNodes(self):
1669
    """Build hooks nodes.
1670

1671
    """
1672
    nl = [
1673
      self.cfg.GetMasterNode(),
1674
      self.instance.primary_node,
1675
      self.op.target_node_uuid,
1676
      ]
1677
    return (nl, nl)
1678

    
1679
  def CheckPrereq(self):
1680
    """Check prerequisites.
1681

1682
    This checks that the instance is in the cluster.
1683

1684
    """
1685
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1686
    assert self.instance is not None, \
1687
      "Cannot retrieve locked instance %s" % self.op.instance_name
1688

    
1689
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1690
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1691
                                 self.instance.disk_template,
1692
                                 errors.ECODE_STATE)
1693

    
1694
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1695
    assert target_node is not None, \
1696
      "Cannot retrieve locked node %s" % self.op.target_node
1697

    
1698
    self.target_node_uuid = target_node.uuid
1699
    if target_node.uuid == self.instance.primary_node:
1700
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1701
                                 (self.instance.name, target_node.name),
1702
                                 errors.ECODE_STATE)
1703

    
1704
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1705

    
1706
    for idx, dsk in enumerate(self.instance.disks):
1707
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1708
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1709
                                   " cannot copy" % idx, errors.ECODE_STATE)
1710

    
1711
    CheckNodeOnline(self, target_node.uuid)
1712
    CheckNodeNotDrained(self, target_node.uuid)
1713
    CheckNodeVmCapable(self, target_node.uuid)
1714
    cluster = self.cfg.GetClusterInfo()
1715
    group_info = self.cfg.GetNodeGroup(target_node.group)
1716
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1717
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1718
                           ignore=self.op.ignore_ipolicy)
1719

    
1720
    if self.instance.admin_state == constants.ADMINST_UP:
1721
      # check memory requirements on the secondary node
1722
      CheckNodeFreeMemory(
1723
          self, target_node.uuid, "failing over instance %s" %
1724
          self.instance.name, bep[constants.BE_MAXMEM],
1725
          self.instance.hypervisor,
1726
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1727
    else:
1728
      self.LogInfo("Not checking memory on the secondary node as"
1729
                   " instance will not be started")
1730

    
1731
    # check bridge existance
1732
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1733

    
1734
  def Exec(self, feedback_fn):
1735
    """Move an instance.
1736

1737
    The move is done by shutting it down on its present node, copying
1738
    the data over (slow) and starting it on the new node.
1739

1740
    """
1741
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1742
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1743

    
1744
    self.LogInfo("Shutting down instance %s on source node %s",
1745
                 self.instance.name, source_node.name)
1746

    
1747
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1748
            self.owned_locks(locking.LEVEL_NODE_RES))
1749

    
1750
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1751
                                             self.op.shutdown_timeout,
1752
                                             self.op.reason)
1753
    if self.op.ignore_consistency:
1754
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1755
                  " anyway. Please make sure node %s is down. Error details" %
1756
                  (self.instance.name, source_node.name, source_node.name),
1757
                  self.LogWarning)
1758
    else:
1759
      result.Raise("Could not shutdown instance %s on node %s" %
1760
                   (self.instance.name, source_node.name))
1761

    
1762
    # create the target disks
1763
    try:
1764
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1765
    except errors.OpExecError:
1766
      self.LogWarning("Device creation failed")
1767
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1768
      raise
1769

    
1770
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1771

    
1772
    errs = []
1773
    # activate, get path, copy the data over
1774
    for idx, disk in enumerate(self.instance.disks):
1775
      self.LogInfo("Copying data for disk %d", idx)
1776
      result = self.rpc.call_blockdev_assemble(
1777
                 target_node.uuid, (disk, self.instance), self.instance.name,
1778
                 True, idx)
1779
      if result.fail_msg:
1780
        self.LogWarning("Can't assemble newly created disk %d: %s",
1781
                        idx, result.fail_msg)
1782
        errs.append(result.fail_msg)
1783
        break
1784
      dev_path = result.payload
1785
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1786
                                                                self.instance),
1787
                                             target_node.name, dev_path,
1788
                                             cluster_name)
1789
      if result.fail_msg:
1790
        self.LogWarning("Can't copy data over for disk %d: %s",
1791
                        idx, result.fail_msg)
1792
        errs.append(result.fail_msg)
1793
        break
1794

    
1795
    if errs:
1796
      self.LogWarning("Some disks failed to copy, aborting")
1797
      try:
1798
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1799
      finally:
1800
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1801
        raise errors.OpExecError("Errors during disk copy: %s" %
1802
                                 (",".join(errs),))
1803

    
1804
    self.instance.primary_node = target_node.uuid
1805
    self.cfg.Update(self.instance, feedback_fn)
1806

    
1807
    self.LogInfo("Removing the disks on the original node")
1808
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1809

    
1810
    # Only start the instance if it's marked as up
1811
    if self.instance.admin_state == constants.ADMINST_UP:
1812
      self.LogInfo("Starting instance %s on node %s",
1813
                   self.instance.name, target_node.name)
1814

    
1815
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1816
                                          ignore_secondaries=True)
1817
      if not disks_ok:
1818
        ShutdownInstanceDisks(self, self.instance)
1819
        raise errors.OpExecError("Can't activate the instance's disks")
1820

    
1821
      result = self.rpc.call_instance_start(target_node.uuid,
1822
                                            (self.instance, None, None), False,
1823
                                            self.op.reason)
1824
      msg = result.fail_msg
1825
      if msg:
1826
        ShutdownInstanceDisks(self, self.instance)
1827
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1828
                                 (self.instance.name, target_node.name, msg))
1829

    
1830

    
1831
class LUInstanceMultiAlloc(NoHooksLU):
1832
  """Allocates multiple instances at the same time.
1833

1834
  """
1835
  REQ_BGL = False
1836

    
1837
  def CheckArguments(self):
1838
    """Check arguments.
1839

1840
    """
1841
    nodes = []
1842
    for inst in self.op.instances:
1843
      if inst.iallocator is not None:
1844
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1845
                                   " instance objects", errors.ECODE_INVAL)
1846
      nodes.append(bool(inst.pnode))
1847
      if inst.disk_template in constants.DTS_INT_MIRROR:
1848
        nodes.append(bool(inst.snode))
1849

    
1850
    has_nodes = compat.any(nodes)
1851
    if compat.all(nodes) ^ has_nodes:
1852
      raise errors.OpPrereqError("There are instance objects providing"
1853
                                 " pnode/snode while others do not",
1854
                                 errors.ECODE_INVAL)
1855

    
1856
    if self.op.iallocator is None:
1857
      default_iallocator = self.cfg.GetDefaultIAllocator()
1858
      if default_iallocator and has_nodes:
1859
        self.op.iallocator = default_iallocator
1860
      else:
1861
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1862
                                   " given and no cluster-wide default"
1863
                                   " iallocator found; please specify either"
1864
                                   " an iallocator or nodes on the instances"
1865
                                   " or set a cluster-wide default iallocator",
1866
                                   errors.ECODE_INVAL)
1867

    
1868
    _CheckOpportunisticLocking(self.op)
1869

    
1870
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1871
    if dups:
1872
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1873
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1874

    
1875
  def ExpandNames(self):
1876
    """Calculate the locks.
1877

1878
    """
1879
    self.share_locks = ShareAll()
1880
    self.needed_locks = {
1881
      # iallocator will select nodes and even if no iallocator is used,
1882
      # collisions with LUInstanceCreate should be avoided
1883
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1884
      }
1885

    
1886
    if self.op.iallocator:
1887
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1888
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1889

    
1890
      if self.op.opportunistic_locking:
1891
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1892
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1893
    else:
1894
      nodeslist = []
1895
      for inst in self.op.instances:
1896
        (inst.pnode_uuid, inst.pnode) = \
1897
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1898
        nodeslist.append(inst.pnode)
1899
        if inst.snode is not None:
1900
          (inst.snode_uuid, inst.snode) = \
1901
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1902
          nodeslist.append(inst.snode)
1903

    
1904
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1905
      # Lock resources of instance's primary and secondary nodes (copy to
1906
      # prevent accidential modification)
1907
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1908

    
1909
  def CheckPrereq(self):
1910
    """Check prerequisite.
1911

1912
    """
1913
    cluster = self.cfg.GetClusterInfo()
1914
    default_vg = self.cfg.GetVGName()
1915
    ec_id = self.proc.GetECId()
1916

    
1917
    if self.op.opportunistic_locking:
1918
      # Only consider nodes for which a lock is held
1919
      node_whitelist = self.cfg.GetNodeNames(
1920
                         list(self.owned_locks(locking.LEVEL_NODE)))
1921
    else:
1922
      node_whitelist = None
1923

    
1924
    insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1925
                                         _ComputeNics(op, cluster, None,
1926
                                                      self.cfg, ec_id),
1927
                                         _ComputeFullBeParams(op, cluster),
1928
                                         node_whitelist)
1929
             for op in self.op.instances]
1930

    
1931
    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1932
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1933

    
1934
    ial.Run(self.op.iallocator)
1935

    
1936
    if not ial.success:
1937
      raise errors.OpPrereqError("Can't compute nodes using"
1938
                                 " iallocator '%s': %s" %
1939
                                 (self.op.iallocator, ial.info),
1940
                                 errors.ECODE_NORES)
1941

    
1942
    self.ia_result = ial.result
1943

    
1944
    if self.op.dry_run:
1945
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1946
        constants.JOB_IDS_KEY: [],
1947
        })
1948

    
1949
  def _ConstructPartialResult(self):
1950
    """Contructs the partial result.
1951

1952
    """
1953
    (allocatable, failed) = self.ia_result
1954
    return {
1955
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1956
        map(compat.fst, allocatable),
1957
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1958
      }
1959

    
1960
  def Exec(self, feedback_fn):
1961
    """Executes the opcode.
1962

1963
    """
1964
    op2inst = dict((op.instance_name, op) for op in self.op.instances)
1965
    (allocatable, failed) = self.ia_result
1966

    
1967
    jobs = []
1968
    for (name, node_names) in allocatable:
1969
      op = op2inst.pop(name)
1970

    
1971
      (op.pnode_uuid, op.pnode) = \
1972
        ExpandNodeUuidAndName(self.cfg, None, node_names[0])
1973
      if len(node_names) > 1:
1974
        (op.snode_uuid, op.snode) = \
1975
          ExpandNodeUuidAndName(self.cfg, None, node_names[1])
1976

    
1977
      jobs.append([op])
1978

    
1979
    missing = set(op2inst.keys()) - set(failed)
1980
    assert not missing, \
1981
      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1982

    
1983
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
1984

    
1985

    
1986
class _InstNicModPrivate:
1987
  """Data structure for network interface modifications.
1988

1989
  Used by L{LUInstanceSetParams}.
1990

1991
  """
1992
  def __init__(self):
1993
    self.params = None
1994
    self.filled = None
1995

    
1996

    
1997
def _PrepareContainerMods(mods, private_fn):
1998
  """Prepares a list of container modifications by adding a private data field.
1999

2000
  @type mods: list of tuples; (operation, index, parameters)
2001
  @param mods: List of modifications
2002
  @type private_fn: callable or None
2003
  @param private_fn: Callable for constructing a private data field for a
2004
    modification
2005
  @rtype: list
2006

2007
  """
2008
  if private_fn is None:
2009
    fn = lambda: None
2010
  else:
2011
    fn = private_fn
2012

    
2013
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2014

    
2015

    
2016
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2017
  """Checks if nodes have enough physical CPUs
2018

2019
  This function checks if all given nodes have the needed number of
2020
  physical CPUs. In case any node has less CPUs or we cannot get the
2021
  information from the node, this function raises an OpPrereqError
2022
  exception.
2023

2024
  @type lu: C{LogicalUnit}
2025
  @param lu: a logical unit from which we get configuration data
2026
  @type node_uuids: C{list}
2027
  @param node_uuids: the list of node UUIDs to check
2028
  @type requested: C{int}
2029
  @param requested: the minimum acceptable number of physical CPUs
2030
  @type hypervisor_specs: list of pairs (string, dict of strings)
2031
  @param hypervisor_specs: list of hypervisor specifications in
2032
      pairs (hypervisor_name, hvparams)
2033
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2034
      or we cannot check the node
2035

2036
  """
2037
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs, None)
2038
  for node_uuid in node_uuids:
2039
    info = nodeinfo[node_uuid]
2040
    node_name = lu.cfg.GetNodeName(node_uuid)
2041
    info.Raise("Cannot get current information from node %s" % node_name,
2042
               prereq=True, ecode=errors.ECODE_ENVIRON)
2043
    (_, _, (hv_info, )) = info.payload
2044
    num_cpus = hv_info.get("cpu_total", None)
2045
    if not isinstance(num_cpus, int):
2046
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2047
                                 " on node %s, result was '%s'" %
2048
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2049
    if requested > num_cpus:
2050
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2051
                                 "required" % (node_name, num_cpus, requested),
2052
                                 errors.ECODE_NORES)
2053

    
2054

    
2055
def GetItemFromContainer(identifier, kind, container):
2056
  """Return the item refered by the identifier.
2057

2058
  @type identifier: string
2059
  @param identifier: Item index or name or UUID
2060
  @type kind: string
2061
  @param kind: One-word item description
2062
  @type container: list
2063
  @param container: Container to get the item from
2064

2065
  """
2066
  # Index
2067
  try:
2068
    idx = int(identifier)
2069
    if idx == -1:
2070
      # Append
2071
      absidx = len(container) - 1
2072
    elif idx < 0:
2073
      raise IndexError("Not accepting negative indices other than -1")
2074
    elif idx > len(container):
2075
      raise IndexError("Got %s index %s, but there are only %s" %
2076
                       (kind, idx, len(container)))
2077
    else:
2078
      absidx = idx
2079
    return (absidx, container[idx])
2080
  except ValueError:
2081
    pass
2082

    
2083
  for idx, item in enumerate(container):
2084
    if item.uuid == identifier or item.name == identifier:
2085
      return (idx, item)
2086

    
2087
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2088
                             (kind, identifier), errors.ECODE_NOENT)
2089

    
2090

    
2091
def _ApplyContainerMods(kind, container, chgdesc, mods,
2092
                        create_fn, modify_fn, remove_fn):
2093
  """Applies descriptions in C{mods} to C{container}.
2094

2095
  @type kind: string
2096
  @param kind: One-word item description
2097
  @type container: list
2098
  @param container: Container to modify
2099
  @type chgdesc: None or list
2100
  @param chgdesc: List of applied changes
2101
  @type mods: list
2102
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2103
  @type create_fn: callable
2104
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2105
    receives absolute item index, parameters and private data object as added
2106
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2107
    as list
2108
  @type modify_fn: callable
2109
  @param modify_fn: Callback for modifying an existing item
2110
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2111
    and private data object as added by L{_PrepareContainerMods}, returns
2112
    changes as list
2113
  @type remove_fn: callable
2114
  @param remove_fn: Callback on removing item; receives absolute item index,
2115
    item and private data object as added by L{_PrepareContainerMods}
2116

2117
  """
2118
  for (op, identifier, params, private) in mods:
2119
    changes = None
2120

    
2121
    if op == constants.DDM_ADD:
2122
      # Calculate where item will be added
2123
      # When adding an item, identifier can only be an index
2124
      try:
2125
        idx = int(identifier)
2126
      except ValueError:
2127
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2128
                                   " identifier for %s" % constants.DDM_ADD,
2129
                                   errors.ECODE_INVAL)
2130
      if idx == -1:
2131
        addidx = len(container)
2132
      else:
2133
        if idx < 0:
2134
          raise IndexError("Not accepting negative indices other than -1")
2135
        elif idx > len(container):
2136
          raise IndexError("Got %s index %s, but there are only %s" %
2137
                           (kind, idx, len(container)))
2138
        addidx = idx
2139

    
2140
      if create_fn is None:
2141
        item = params
2142
      else:
2143
        (item, changes) = create_fn(addidx, params, private)
2144

    
2145
      if idx == -1:
2146
        container.append(item)
2147
      else:
2148
        assert idx >= 0
2149
        assert idx <= len(container)
2150
        # list.insert does so before the specified index
2151
        container.insert(idx, item)
2152
    else:
2153
      # Retrieve existing item
2154
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2155

    
2156
      if op == constants.DDM_REMOVE:
2157
        assert not params
2158

    
2159
        if remove_fn is not None:
2160
          remove_fn(absidx, item, private)
2161

    
2162
        changes = [("%s/%s" % (kind, absidx), "remove")]
2163

    
2164
        assert container[absidx] == item
2165
        del container[absidx]
2166
      elif op == constants.DDM_MODIFY:
2167
        if modify_fn is not None:
2168
          changes = modify_fn(absidx, item, params, private)
2169
      else:
2170
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2171

    
2172
    assert _TApplyContModsCbChanges(changes)
2173

    
2174
    if not (chgdesc is None or changes is None):
2175
      chgdesc.extend(changes)
2176

    
2177

    
2178
def _UpdateIvNames(base_index, disks):
2179
  """Updates the C{iv_name} attribute of disks.
2180

2181
  @type disks: list of L{objects.Disk}
2182

2183
  """
2184
  for (idx, disk) in enumerate(disks):
2185
    disk.iv_name = "disk/%s" % (base_index + idx, )
2186

    
2187

    
2188
class LUInstanceSetParams(LogicalUnit):
2189
  """Modifies an instances's parameters.
2190

2191
  """
2192
  HPATH = "instance-modify"
2193
  HTYPE = constants.HTYPE_INSTANCE
2194
  REQ_BGL = False
2195

    
2196
  @staticmethod
2197
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2198
    assert ht.TList(mods)
2199
    assert not mods or len(mods[0]) in (2, 3)
2200

    
2201
    if mods and len(mods[0]) == 2:
2202
      result = []
2203

    
2204
      addremove = 0
2205
      for op, params in mods:
2206
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2207
          result.append((op, -1, params))
2208
          addremove += 1
2209

    
2210
          if addremove > 1:
2211
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2212
                                       " supported at a time" % kind,
2213
                                       errors.ECODE_INVAL)
2214
        else:
2215
          result.append((constants.DDM_MODIFY, op, params))
2216

    
2217
      assert verify_fn(result)
2218
    else:
2219
      result = mods
2220

    
2221
    return result
2222

    
2223
  @staticmethod
2224
  def _CheckMods(kind, mods, key_types, item_fn):
2225
    """Ensures requested disk/NIC modifications are valid.
2226

2227
    """
2228
    for (op, _, params) in mods:
2229
      assert ht.TDict(params)
2230

    
2231
      # If 'key_types' is an empty dict, we assume we have an
2232
      # 'ext' template and thus do not ForceDictType
2233
      if key_types:
2234
        utils.ForceDictType(params, key_types)
2235

    
2236
      if op == constants.DDM_REMOVE:
2237
        if params:
2238
          raise errors.OpPrereqError("No settings should be passed when"
2239
                                     " removing a %s" % kind,
2240
                                     errors.ECODE_INVAL)
2241
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2242
        item_fn(op, params)
2243
      else:
2244
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2245

    
2246
  @staticmethod
2247
  def _VerifyDiskModification(op, params, excl_stor):
2248
    """Verifies a disk modification.
2249

2250
    """
2251
    if op == constants.DDM_ADD:
2252
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2253
      if mode not in constants.DISK_ACCESS_SET:
2254
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2255
                                   errors.ECODE_INVAL)
2256

    
2257
      size = params.get(constants.IDISK_SIZE, None)
2258
      if size is None:
2259
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2260
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2261

    
2262
      try:
2263
        size = int(size)
2264
      except (TypeError, ValueError), err:
2265
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2266
                                   errors.ECODE_INVAL)
2267

    
2268
      params[constants.IDISK_SIZE] = size
2269
      name = params.get(constants.IDISK_NAME, None)
2270
      if name is not None and name.lower() == constants.VALUE_NONE:
2271
        params[constants.IDISK_NAME] = None
2272

    
2273
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2274

    
2275
    elif op == constants.DDM_MODIFY:
2276
      if constants.IDISK_SIZE in params:
2277
        raise errors.OpPrereqError("Disk size change not possible, use"
2278
                                   " grow-disk", errors.ECODE_INVAL)
2279
      if len(params) > 2:
2280
        raise errors.OpPrereqError("Disk modification doesn't support"
2281
                                   " additional arbitrary parameters",
2282
                                   errors.ECODE_INVAL)
2283
      name = params.get(constants.IDISK_NAME, None)
2284
      if name is not None and name.lower() == constants.VALUE_NONE:
2285
        params[constants.IDISK_NAME] = None
2286

    
2287
  @staticmethod
2288
  def _VerifyNicModification(op, params):
2289
    """Verifies a network interface modification.
2290

2291
    """
2292
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2293
      ip = params.get(constants.INIC_IP, None)
2294
      name = params.get(constants.INIC_NAME, None)
2295
      req_net = params.get(constants.INIC_NETWORK, None)
2296
      link = params.get(constants.NIC_LINK, None)
2297
      mode = params.get(constants.NIC_MODE, None)
2298
      if name is not None and name.lower() == constants.VALUE_NONE:
2299
        params[constants.INIC_NAME] = None
2300
      if req_net is not None:
2301
        if req_net.lower() == constants.VALUE_NONE:
2302
          params[constants.INIC_NETWORK] = None
2303
          req_net = None
2304
        elif link is not None or mode is not None:
2305
          raise errors.OpPrereqError("If network is given"
2306
                                     " mode or link should not",
2307
                                     errors.ECODE_INVAL)
2308

    
2309
      if op == constants.DDM_ADD:
2310
        macaddr = params.get(constants.INIC_MAC, None)
2311
        if macaddr is None:
2312
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2313

    
2314
      if ip is not None:
2315
        if ip.lower() == constants.VALUE_NONE:
2316
          params[constants.INIC_IP] = None
2317
        else:
2318
          if ip.lower() == constants.NIC_IP_POOL:
2319
            if op == constants.DDM_ADD and req_net is None:
2320
              raise errors.OpPrereqError("If ip=pool, parameter network"
2321
                                         " cannot be none",
2322
                                         errors.ECODE_INVAL)
2323
          else:
2324
            if not netutils.IPAddress.IsValid(ip):
2325
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2326
                                         errors.ECODE_INVAL)
2327

    
2328
      if constants.INIC_MAC in params:
2329
        macaddr = params[constants.INIC_MAC]
2330
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2331
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2332

    
2333
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2334
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2335
                                     " modifying an existing NIC",
2336
                                     errors.ECODE_INVAL)
2337

    
2338
  def CheckArguments(self):
2339
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2340
            self.op.hvparams or self.op.beparams or self.op.os_name or
2341
            self.op.offline is not None or self.op.runtime_mem or
2342
            self.op.pnode):
2343
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2344

    
2345
    if self.op.hvparams:
2346
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2347
                           "hypervisor", "instance", "cluster")
2348

    
2349
    self.op.disks = self._UpgradeDiskNicMods(
2350
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2351
    self.op.nics = self._UpgradeDiskNicMods(
2352
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2353

    
2354
    if self.op.disks and self.op.disk_template is not None:
2355
      raise errors.OpPrereqError("Disk template conversion and other disk"
2356
                                 " changes not supported at the same time",
2357
                                 errors.ECODE_INVAL)
2358

    
2359
    if (self.op.disk_template and
2360
        self.op.disk_template in constants.DTS_INT_MIRROR and
2361
        self.op.remote_node is None):
2362
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2363
                                 " one requires specifying a secondary node",
2364
                                 errors.ECODE_INVAL)
2365

    
2366
    # Check NIC modifications
2367
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2368
                    self._VerifyNicModification)
2369

    
2370
    if self.op.pnode:
2371
      (self.op.pnode_uuid, self.op.pnode) = \
2372
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2373

    
2374
  def ExpandNames(self):
2375
    self._ExpandAndLockInstance()
2376
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2377
    # Can't even acquire node locks in shared mode as upcoming changes in
2378
    # Ganeti 2.6 will start to modify the node object on disk conversion
2379
    self.needed_locks[locking.LEVEL_NODE] = []
2380
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2381
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2382
    # Look node group to look up the ipolicy
2383
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2384

    
2385
  def DeclareLocks(self, level):
2386
    if level == locking.LEVEL_NODEGROUP:
2387
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2388
      # Acquire locks for the instance's nodegroups optimistically. Needs
2389
      # to be verified in CheckPrereq
2390
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2391
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2392
    elif level == locking.LEVEL_NODE:
2393
      self._LockInstancesNodes()
2394
      if self.op.disk_template and self.op.remote_node:
2395
        (self.op.remote_node_uuid, self.op.remote_node) = \
2396
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2397
                                self.op.remote_node)
2398
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2399
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2400
      # Copy node locks
2401
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2402
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2403

    
2404
  def BuildHooksEnv(self):
2405
    """Build hooks env.
2406

2407
    This runs on the master, primary and secondaries.
2408

2409
    """
2410
    args = {}
2411
    if constants.BE_MINMEM in self.be_new:
2412
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2413
    if constants.BE_MAXMEM in self.be_new:
2414
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2415
    if constants.BE_VCPUS in self.be_new:
2416
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2417
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2418
    # information at all.
2419

    
2420
    if self._new_nics is not None:
2421
      nics = []
2422

    
2423
      for nic in self._new_nics:
2424
        n = copy.deepcopy(nic)
2425
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2426
        n.nicparams = nicparams
2427
        nics.append(NICToTuple(self, n))
2428

    
2429
      args["nics"] = nics
2430

    
2431
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2432
    if self.op.disk_template:
2433
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2434
    if self.op.runtime_mem:
2435
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2436

    
2437
    return env
2438

    
2439
  def BuildHooksNodes(self):
2440
    """Build hooks nodes.
2441

2442
    """
2443
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2444
    return (nl, nl)
2445

    
2446
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2447
                              old_params, cluster, pnode_uuid):
2448

    
2449
    update_params_dict = dict([(key, params[key])
2450
                               for key in constants.NICS_PARAMETERS
2451
                               if key in params])
2452

    
2453
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2454
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2455

    
2456
    new_net_uuid = None
2457
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2458
    if new_net_uuid_or_name:
2459
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2460
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2461

    
2462
    if old_net_uuid:
2463
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2464

    
2465
    if new_net_uuid:
2466
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2467
      if not netparams:
2468
        raise errors.OpPrereqError("No netparams found for the network"
2469
                                   " %s, probably not connected" %
2470
                                   new_net_obj.name, errors.ECODE_INVAL)
2471
      new_params = dict(netparams)
2472
    else:
2473
      new_params = GetUpdatedParams(old_params, update_params_dict)
2474

    
2475
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2476

    
2477
    new_filled_params = cluster.SimpleFillNIC(new_params)
2478
    objects.NIC.CheckParameterSyntax(new_filled_params)
2479

    
2480
    new_mode = new_filled_params[constants.NIC_MODE]
2481
    if new_mode == constants.NIC_MODE_BRIDGED:
2482
      bridge = new_filled_params[constants.NIC_LINK]
2483
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2484
      if msg:
2485
        msg = "Error checking bridges on node '%s': %s" % \
2486
                (self.cfg.GetNodeName(pnode_uuid), msg)
2487
        if self.op.force:
2488
          self.warn.append(msg)
2489
        else:
2490
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2491

    
2492
    elif new_mode == constants.NIC_MODE_ROUTED:
2493
      ip = params.get(constants.INIC_IP, old_ip)
2494
      if ip is None:
2495
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2496
                                   " on a routed NIC", errors.ECODE_INVAL)
2497

    
2498
    elif new_mode == constants.NIC_MODE_OVS:
2499
      # TODO: check OVS link
2500
      self.LogInfo("OVS links are currently not checked for correctness")
2501

    
2502
    if constants.INIC_MAC in params:
2503
      mac = params[constants.INIC_MAC]
2504
      if mac is None:
2505
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2506
                                   errors.ECODE_INVAL)
2507
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2508
        # otherwise generate the MAC address
2509
        params[constants.INIC_MAC] = \
2510
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2511
      else:
2512
        # or validate/reserve the current one
2513
        try:
2514
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2515
        except errors.ReservationError:
2516
          raise errors.OpPrereqError("MAC address '%s' already in use"
2517
                                     " in cluster" % mac,
2518
                                     errors.ECODE_NOTUNIQUE)
2519
    elif new_net_uuid != old_net_uuid:
2520

    
2521
      def get_net_prefix(net_uuid):
2522
        mac_prefix = None
2523
        if net_uuid:
2524
          nobj = self.cfg.GetNetwork(net_uuid)
2525
          mac_prefix = nobj.mac_prefix
2526

    
2527
        return mac_prefix
2528

    
2529
      new_prefix = get_net_prefix(new_net_uuid)
2530
      old_prefix = get_net_prefix(old_net_uuid)
2531
      if old_prefix != new_prefix:
2532
        params[constants.INIC_MAC] = \
2533
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2534

    
2535
    # if there is a change in (ip, network) tuple
2536
    new_ip = params.get(constants.INIC_IP, old_ip)
2537
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2538
      if new_ip:
2539
        # if IP is pool then require a network and generate one IP
2540
        if new_ip.lower() == constants.NIC_IP_POOL:
2541
          if new_net_uuid:
2542
            try:
2543
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2544
            except errors.ReservationError:
2545
              raise errors.OpPrereqError("Unable to get a free IP"
2546
                                         " from the address pool",
2547
                                         errors.ECODE_STATE)
2548
            self.LogInfo("Chose IP %s from network %s",
2549
                         new_ip,
2550
                         new_net_obj.name)
2551
            params[constants.INIC_IP] = new_ip
2552
          else:
2553
            raise errors.OpPrereqError("ip=pool, but no network found",
2554
                                       errors.ECODE_INVAL)
2555
        # Reserve new IP if in the new network if any
2556
        elif new_net_uuid:
2557
          try:
2558
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2559
            self.LogInfo("Reserving IP %s in network %s",
2560
                         new_ip, new_net_obj.name)
2561
          except errors.ReservationError:
2562
            raise errors.OpPrereqError("IP %s not available in network %s" %
2563
                                       (new_ip, new_net_obj.name),
2564
                                       errors.ECODE_NOTUNIQUE)
2565
        # new network is None so check if new IP is a conflicting IP
2566
        elif self.op.conflicts_check:
2567
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2568

    
2569
      # release old IP if old network is not None
2570
      if old_ip and old_net_uuid:
2571
        try:
2572
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2573
        except errors.AddressPoolError:
2574
          logging.warning("Release IP %s not contained in network %s",
2575
                          old_ip, old_net_obj.name)
2576

    
2577
    # there are no changes in (ip, network) tuple and old network is not None
2578
    elif (old_net_uuid is not None and
2579
          (req_link is not None or req_mode is not None)):
2580
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2581
                                 " a NIC that is connected to a network",
2582
                                 errors.ECODE_INVAL)
2583

    
2584
    private.params = new_params
2585
    private.filled = new_filled_params
2586

    
2587
  def _PreCheckDiskTemplate(self, pnode_info):
2588
    """CheckPrereq checks related to a new disk template."""
2589
    # Arguments are passed to avoid configuration lookups
2590
    pnode_uuid = self.instance.primary_node
2591
    if self.instance.disk_template == self.op.disk_template:
2592
      raise errors.OpPrereqError("Instance already has disk template %s" %
2593
                                 self.instance.disk_template,
2594
                                 errors.ECODE_INVAL)
2595

    
2596
    if (self.instance.disk_template,
2597
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2598
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2599
                                 " %s to %s" % (self.instance.disk_template,
2600
                                                self.op.disk_template),
2601
                                 errors.ECODE_INVAL)
2602
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2603
                       msg="cannot change disk template")
2604
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2605
      if self.op.remote_node_uuid == pnode_uuid:
2606
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2607
                                   " as the primary node of the instance" %
2608
                                   self.op.remote_node, errors.ECODE_STATE)
2609
      CheckNodeOnline(self, self.op.remote_node_uuid)
2610
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2611
      # FIXME: here we assume that the old instance type is DT_PLAIN
2612
      assert self.instance.disk_template == constants.DT_PLAIN
2613
      disks = [{constants.IDISK_SIZE: d.size,
2614
                constants.IDISK_VG: d.logical_id[0]}
2615
               for d in self.instance.disks]
2616
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2617
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2618

    
2619
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2620
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2621
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2622
                                                              snode_group)
2623
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2624
                             ignore=self.op.ignore_ipolicy)
2625
      if pnode_info.group != snode_info.group:
2626
        self.LogWarning("The primary and secondary nodes are in two"
2627
                        " different node groups; the disk parameters"
2628
                        " from the first disk's node group will be"
2629
                        " used")
2630

    
2631
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2632
      # Make sure none of the nodes require exclusive storage
2633
      nodes = [pnode_info]
2634
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2635
        assert snode_info
2636
        nodes.append(snode_info)
2637
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2638
      if compat.any(map(has_es, nodes)):
2639
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2640
                  " storage is enabled" % (self.instance.disk_template,
2641
                                           self.op.disk_template))
2642
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2643

    
2644
  def _PreCheckDisks(self, ispec):
2645
    """CheckPrereq checks related to disk changes.
2646

2647
    @type ispec: dict
2648
    @param ispec: instance specs to be updated with the new disks
2649

2650
    """
2651
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2652

    
2653
    excl_stor = compat.any(
2654
      rpc.GetExclusiveStorageForNodes(self.cfg,
2655
                                      self.instance.all_nodes).values()
2656
      )
2657

    
2658
    # Check disk modifications. This is done here and not in CheckArguments
2659
    # (as with NICs), because we need to know the instance's disk template
2660
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2661
    if self.instance.disk_template == constants.DT_EXT:
2662
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2663
    else:
2664
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2665
                      ver_fn)
2666

    
2667
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2668

    
2669
    # Check the validity of the `provider' parameter
2670
    if self.instance.disk_template in constants.DT_EXT:
2671
      for mod in self.diskmod:
2672
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2673
        if mod[0] == constants.DDM_ADD:
2674
          if ext_provider is None:
2675
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2676
                                       " '%s' missing, during disk add" %
2677
                                       (constants.DT_EXT,
2678
                                        constants.IDISK_PROVIDER),
2679
                                       errors.ECODE_NOENT)
2680
        elif mod[0] == constants.DDM_MODIFY:
2681
          if ext_provider:
2682
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2683
                                       " modification" %
2684
                                       constants.IDISK_PROVIDER,
2685
                                       errors.ECODE_INVAL)
2686
    else:
2687
      for mod in self.diskmod:
2688
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2689
        if ext_provider is not None:
2690
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2691
                                     " instances of type '%s'" %
2692
                                     (constants.IDISK_PROVIDER,
2693
                                      constants.DT_EXT),
2694
                                     errors.ECODE_INVAL)
2695

    
2696
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2697
      raise errors.OpPrereqError("Disk operations not supported for"
2698
                                 " diskless instances", errors.ECODE_INVAL)
2699

    
2700
    def _PrepareDiskMod(_, disk, params, __):
2701
      disk.name = params.get(constants.IDISK_NAME, None)
2702

    
2703
    # Verify disk changes (operating on a copy)
2704
    disks = copy.deepcopy(self.instance.disks)
2705
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2706
                        _PrepareDiskMod, None)
2707
    utils.ValidateDeviceNames("disk", disks)
2708
    if len(disks) > constants.MAX_DISKS:
2709
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2710
                                 " more" % constants.MAX_DISKS,
2711
                                 errors.ECODE_STATE)
2712
    disk_sizes = [disk.size for disk in self.instance.disks]
2713
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2714
                      self.diskmod if op == constants.DDM_ADD)
2715
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2716
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2717

    
2718
    if self.op.offline is not None and self.op.offline:
2719
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2720
                         msg="can't change to offline")
2721

    
2722
  def CheckPrereq(self):
2723
    """Check prerequisites.
2724

2725
    This only checks the instance list against the existing names.
2726

2727
    """
2728
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2729
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2730
    self.cluster = self.cfg.GetClusterInfo()
2731

    
2732
    assert self.instance is not None, \
2733
      "Cannot retrieve locked instance %s" % self.op.instance_name
2734

    
2735
    pnode_uuid = self.instance.primary_node
2736

    
2737
    self.warn = []
2738

    
2739
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2740
        not self.op.force):
2741
      # verify that the instance is not up
2742
      instance_info = self.rpc.call_instance_info(
2743
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2744
          self.instance.hvparams)
2745
      if instance_info.fail_msg:
2746
        self.warn.append("Can't get instance runtime information: %s" %
2747
                         instance_info.fail_msg)
2748
      elif instance_info.payload:
2749
        raise errors.OpPrereqError("Instance is still running on %s" %
2750
                                   self.cfg.GetNodeName(pnode_uuid),
2751
                                   errors.ECODE_STATE)
2752

    
2753
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2754
    node_uuids = list(self.instance.all_nodes)
2755
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2756

    
2757
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2758
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2759
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2760

    
2761
    # dictionary with instance information after the modification
2762
    ispec = {}
2763

    
2764
    # Prepare NIC modifications
2765
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2766

    
2767
    # OS change
2768
    if self.op.os_name and not self.op.force:
2769
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2770
                     self.op.force_variant)
2771
      instance_os = self.op.os_name
2772
    else:
2773
      instance_os = self.instance.os
2774

    
2775
    assert not (self.op.disk_template and self.op.disks), \
2776
      "Can't modify disk template and apply disk changes at the same time"
2777

    
2778
    if self.op.disk_template:
2779
      self._PreCheckDiskTemplate(pnode_info)
2780

    
2781
    self._PreCheckDisks(ispec)
2782

    
2783
    # hvparams processing
2784
    if self.op.hvparams:
2785
      hv_type = self.instance.hypervisor
2786
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2787
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2788
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2789

    
2790
      # local check
2791
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2792
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2793
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2794
      self.hv_inst = i_hvdict # the new dict (without defaults)
2795
    else:
2796
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2797
                                                   self.instance.os,
2798
                                                   self.instance.hvparams)
2799
      self.hv_new = self.hv_inst = {}
2800

    
2801
    # beparams processing
2802
    if self.op.beparams:
2803
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2804
                                  use_none=True)
2805
      objects.UpgradeBeParams(i_bedict)
2806
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2807
      be_new = self.cluster.SimpleFillBE(i_bedict)
2808
      self.be_proposed = self.be_new = be_new # the new actual values
2809
      self.be_inst = i_bedict # the new dict (without defaults)
2810
    else:
2811
      self.be_new = self.be_inst = {}
2812
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2813
    be_old = self.cluster.FillBE(self.instance)
2814

    
2815
    # CPU param validation -- checking every time a parameter is
2816
    # changed to cover all cases where either CPU mask or vcpus have
2817
    # changed
2818
    if (constants.BE_VCPUS in self.be_proposed and
2819
        constants.HV_CPU_MASK in self.hv_proposed):
2820
      cpu_list = \
2821
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2822
      # Verify mask is consistent with number of vCPUs. Can skip this
2823
      # test if only 1 entry in the CPU mask, which means same mask
2824
      # is applied to all vCPUs.
2825
      if (len(cpu_list) > 1 and
2826
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2827
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2828
                                   " CPU mask [%s]" %
2829
                                   (self.be_proposed[constants.BE_VCPUS],
2830
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2831
                                   errors.ECODE_INVAL)
2832

    
2833
      # Only perform this test if a new CPU mask is given
2834
      if constants.HV_CPU_MASK in self.hv_new:
2835
        # Calculate the largest CPU number requested
2836
        max_requested_cpu = max(map(max, cpu_list))
2837
        # Check that all of the instance's nodes have enough physical CPUs to
2838
        # satisfy the requested CPU mask
2839
        hvspecs = [(self.instance.hypervisor,
2840
                    self.cfg.GetClusterInfo()
2841
                      .hvparams[self.instance.hypervisor])]
2842
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2843
                                max_requested_cpu + 1,
2844
                                hvspecs)
2845

    
2846
    # osparams processing
2847
    if self.op.osparams:
2848
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2849
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2850
      self.os_inst = i_osdict # the new dict (without defaults)
2851
    else:
2852
      self.os_inst = {}
2853

    
2854
    #TODO(dynmem): do the appropriate check involving MINMEM
2855
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2856
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2857
      mem_check_list = [pnode_uuid]
2858
      if be_new[constants.BE_AUTO_BALANCE]:
2859
        # either we changed auto_balance to yes or it was from before
2860
        mem_check_list.extend(self.instance.secondary_nodes)
2861
      instance_info = self.rpc.call_instance_info(
2862
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2863
          self.instance.hvparams)
2864
      hvspecs = [(self.instance.hypervisor,
2865
                  self.cluster.hvparams[self.instance.hypervisor])]
2866
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2867
                                         hvspecs, False)
2868
      pninfo = nodeinfo[pnode_uuid]
2869
      msg = pninfo.fail_msg
2870
      if msg:
2871
        # Assume the primary node is unreachable and go ahead
2872
        self.warn.append("Can't get info from primary node %s: %s" %
2873
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2874
      else:
2875
        (_, _, (pnhvinfo, )) = pninfo.payload
2876
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2877
          self.warn.append("Node data from primary node %s doesn't contain"
2878
                           " free memory information" %
2879
                           self.cfg.GetNodeName(pnode_uuid))
2880
        elif instance_info.fail_msg:
2881
          self.warn.append("Can't get instance runtime information: %s" %
2882
                           instance_info.fail_msg)
2883
        else:
2884
          if instance_info.payload:
2885
            current_mem = int(instance_info.payload["memory"])
2886
          else:
2887
            # Assume instance not running
2888
            # (there is a slight race condition here, but it's not very
2889
            # probable, and we have no other way to check)
2890
            # TODO: Describe race condition
2891
            current_mem = 0
2892
          #TODO(dynmem): do the appropriate check involving MINMEM
2893
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2894
                      pnhvinfo["memory_free"])
2895
          if miss_mem > 0:
2896
            raise errors.OpPrereqError("This change will prevent the instance"
2897
                                       " from starting, due to %d MB of memory"
2898
                                       " missing on its primary node" %
2899
                                       miss_mem, errors.ECODE_NORES)
2900

    
2901
      if be_new[constants.BE_AUTO_BALANCE]:
2902
        for node_uuid, nres in nodeinfo.items():
2903
          if node_uuid not in self.instance.secondary_nodes:
2904
            continue
2905
          nres.Raise("Can't get info from secondary node %s" %
2906
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2907
                     ecode=errors.ECODE_STATE)
2908
          (_, _, (nhvinfo, )) = nres.payload
2909
          if not isinstance(nhvinfo.get("memory_free", None), int):
2910
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2911
                                       " memory information" %
2912
                                       self.cfg.GetNodeName(node_uuid),
2913
                                       errors.ECODE_STATE)
2914
          #TODO(dynmem): do the appropriate check involving MINMEM
2915
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2916
            raise errors.OpPrereqError("This change will prevent the instance"
2917
                                       " from failover to its secondary node"
2918
                                       " %s, due to not enough memory" %
2919
                                       self.cfg.GetNodeName(node_uuid),
2920
                                       errors.ECODE_STATE)
2921

    
2922
    if self.op.runtime_mem:
2923
      remote_info = self.rpc.call_instance_info(
2924
         self.instance.primary_node, self.instance.name,
2925
         self.instance.hypervisor,
2926
         self.cluster.hvparams[self.instance.hypervisor])
2927
      remote_info.Raise("Error checking node %s" %
2928
                        self.cfg.GetNodeName(self.instance.primary_node))
2929
      if not remote_info.payload: # not running already
2930
        raise errors.OpPrereqError("Instance %s is not running" %
2931
                                   self.instance.name, errors.ECODE_STATE)
2932

    
2933
      current_memory = remote_info.payload["memory"]
2934
      if (not self.op.force and
2935
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2936
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2937
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2938
                                   " and %d MB of memory unless --force is"
2939
                                   " given" %
2940
                                   (self.instance.name,
2941
                                    self.be_proposed[constants.BE_MINMEM],
2942
                                    self.be_proposed[constants.BE_MAXMEM]),
2943
                                   errors.ECODE_INVAL)
2944

    
2945
      delta = self.op.runtime_mem - current_memory
2946
      if delta > 0:
2947
        CheckNodeFreeMemory(
2948
            self, self.instance.primary_node,
2949
            "ballooning memory for instance %s" % self.instance.name, delta,
2950
            self.instance.hypervisor,
2951
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
2952

    
2953
    # make self.cluster visible in the functions below
2954
    cluster = self.cluster
2955

    
2956
    def _PrepareNicCreate(_, params, private):
2957
      self._PrepareNicModification(params, private, None, None,
2958
                                   {}, cluster, pnode_uuid)
2959
      return (None, None)
2960

    
2961
    def _PrepareNicMod(_, nic, params, private):
2962
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2963
                                   nic.nicparams, cluster, pnode_uuid)
2964
      return None
2965

    
2966
    def _PrepareNicRemove(_, params, __):
2967
      ip = params.ip
2968
      net = params.network
2969
      if net is not None and ip is not None:
2970
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2971

    
2972
    # Verify NIC changes (operating on copy)
2973
    nics = self.instance.nics[:]
2974
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2975
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2976
    if len(nics) > constants.MAX_NICS:
2977
      raise errors.OpPrereqError("Instance has too many network interfaces"
2978
                                 " (%d), cannot add more" % constants.MAX_NICS,
2979
                                 errors.ECODE_STATE)
2980

    
2981
    # Pre-compute NIC changes (necessary to use result in hooks)
2982
    self._nic_chgdesc = []
2983
    if self.nicmod:
2984
      # Operate on copies as this is still in prereq
2985
      nics = [nic.Copy() for nic in self.instance.nics]
2986
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2987
                          self._CreateNewNic, self._ApplyNicMods,
2988
                          self._RemoveNic)
2989
      # Verify that NIC names are unique and valid
2990
      utils.ValidateDeviceNames("NIC", nics)
2991
      self._new_nics = nics
2992
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2993
    else:
2994
      self._new_nics = None
2995
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
2996

    
2997
    if not self.op.ignore_ipolicy:
2998
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2999
                                                              group_info)
3000

    
3001
      # Fill ispec with backend parameters
3002
      ispec[constants.ISPEC_SPINDLE_USE] = \
3003
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3004
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3005
                                                         None)
3006

    
3007
      # Copy ispec to verify parameters with min/max values separately
3008
      if self.op.disk_template:
3009
        new_disk_template = self.op.disk_template
3010
      else:
3011
        new_disk_template = self.instance.disk_template
3012
      ispec_max = ispec.copy()
3013
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3014
        self.be_new.get(constants.BE_MAXMEM, None)
3015
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3016
                                                     new_disk_template)
3017
      ispec_min = ispec.copy()
3018
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3019
        self.be_new.get(constants.BE_MINMEM, None)
3020
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3021
                                                     new_disk_template)
3022

    
3023
      if (res_max or res_min):
3024
        # FIXME: Improve error message by including information about whether
3025
        # the upper or lower limit of the parameter fails the ipolicy.
3026
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3027
               (group_info, group_info.name,
3028
                utils.CommaJoin(set(res_max + res_min))))
3029
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3030

    
3031
  def _ConvertPlainToDrbd(self, feedback_fn):
3032
    """Converts an instance from plain to drbd.
3033

3034
    """
3035
    feedback_fn("Converting template to drbd")
3036
    pnode_uuid = self.instance.primary_node
3037
    snode_uuid = self.op.remote_node_uuid
3038

    
3039
    assert self.instance.disk_template == constants.DT_PLAIN
3040

    
3041
    # create a fake disk info for _GenerateDiskTemplate
3042
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3043
                  constants.IDISK_VG: d.logical_id[0],
3044
                  constants.IDISK_NAME: d.name}
3045
                 for d in self.instance.disks]
3046
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3047
                                     self.instance.uuid, pnode_uuid,
3048
                                     [snode_uuid], disk_info, None, None, 0,
3049
                                     feedback_fn, self.diskparams)
3050
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3051
                                        self.diskparams)
3052
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3053
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3054
    info = GetInstanceInfoText(self.instance)
3055
    feedback_fn("Creating additional volumes...")
3056
    # first, create the missing data and meta devices
3057
    for disk in anno_disks:
3058
      # unfortunately this is... not too nice
3059
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3060
                           info, True, p_excl_stor)
3061
      for child in disk.children:
3062
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3063
                             s_excl_stor)
3064
    # at this stage, all new LVs have been created, we can rename the
3065
    # old ones
3066
    feedback_fn("Renaming original volumes...")
3067
    rename_list = [(o, n.children[0].logical_id)
3068
                   for (o, n) in zip(self.instance.disks, new_disks)]
3069
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3070
    result.Raise("Failed to rename original LVs")
3071

    
3072
    feedback_fn("Initializing DRBD devices...")
3073
    # all child devices are in place, we can now create the DRBD devices
3074
    try:
3075
      for disk in anno_disks:
3076
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3077
                                       (snode_uuid, s_excl_stor)]:
3078
          f_create = node_uuid == pnode_uuid
3079
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3080
                               f_create, excl_stor)
3081
    except errors.GenericError, e:
3082
      feedback_fn("Initializing of DRBD devices failed;"
3083
                  " renaming back original volumes...")
3084
      for disk in new_disks:
3085
        self.cfg.SetDiskID(disk, pnode_uuid)
3086
      rename_back_list = [(n.children[0], o.logical_id)
3087
                          for (n, o) in zip(new_disks, self.instance.disks)]
3088
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3089
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3090
      raise
3091

    
3092
    # at this point, the instance has been modified
3093
    self.instance.disk_template = constants.DT_DRBD8
3094
    self.instance.disks = new_disks
3095
    self.cfg.Update(self.instance, feedback_fn)
3096

    
3097
    # Release node locks while waiting for sync
3098
    ReleaseLocks(self, locking.LEVEL_NODE)
3099

    
3100
    # disks are created, waiting for sync
3101
    disk_abort = not WaitForSync(self, self.instance,
3102
                                 oneshot=not self.op.wait_for_sync)
3103
    if disk_abort:
3104
      raise errors.OpExecError("There are some degraded disks for"
3105
                               " this instance, please cleanup manually")
3106

    
3107
    # Node resource locks will be released by caller
3108

    
3109
  def _ConvertDrbdToPlain(self, feedback_fn):
3110
    """Converts an instance from drbd to plain.
3111

3112
    """
3113
    assert len(self.instance.secondary_nodes) == 1
3114
    assert self.instance.disk_template == constants.DT_DRBD8
3115

    
3116
    pnode_uuid = self.instance.primary_node
3117
    snode_uuid = self.instance.secondary_nodes[0]
3118
    feedback_fn("Converting template to plain")
3119

    
3120
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3121
    new_disks = [d.children[0] for d in self.instance.disks]
3122

    
3123
    # copy over size, mode and name
3124
    for parent, child in zip(old_disks, new_disks):
3125
      child.size = parent.size
3126
      child.mode = parent.mode
3127
      child.name = parent.name
3128

    
3129
    # this is a DRBD disk, return its port to the pool
3130
    # NOTE: this must be done right before the call to cfg.Update!
3131
    for disk in old_disks:
3132
      tcp_port = disk.logical_id[2]
3133
      self.cfg.AddTcpUdpPort(tcp_port)
3134

    
3135
    # update instance structure
3136
    self.instance.disks = new_disks
3137
    self.instance.disk_template = constants.DT_PLAIN
3138
    _UpdateIvNames(0, self.instance.disks)
3139
    self.cfg.Update(self.instance, feedback_fn)
3140

    
3141
    # Release locks in case removing disks takes a while
3142
    ReleaseLocks(self, locking.LEVEL_NODE)
3143

    
3144
    feedback_fn("Removing volumes on the secondary node...")
3145
    for disk in old_disks:
3146
      self.cfg.SetDiskID(disk, snode_uuid)
3147
      msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3148
      if msg:
3149
        self.LogWarning("Could not remove block device %s on node %s,"
3150
                        " continuing anyway: %s", disk.iv_name,
3151
                        self.cfg.GetNodeName(snode_uuid), msg)
3152

    
3153
    feedback_fn("Removing unneeded volumes on the primary node...")
3154
    for idx, disk in enumerate(old_disks):
3155
      meta = disk.children[1]
3156
      self.cfg.SetDiskID(meta, pnode_uuid)
3157
      msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3158
      if msg:
3159
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3160
                        " continuing anyway: %s", idx,
3161
                        self.cfg.GetNodeName(pnode_uuid), msg)
3162

    
3163
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3164
      self.LogInfo("Trying to hotplug device...")
3165
      result = self.rpc.call_hotplug_device(self.instance.primary_node,
3166
                                            self.instance, action, dev_type,
3167
                                            device, extra, seq)
3168
      result.Raise("Could not hotplug device.")
3169
      self.LogInfo("Hotplug done.")
3170

    
3171
  def _CreateNewDisk(self, idx, params, _):
3172
    """Creates a new disk.
3173

3174
    """
3175
    # add a new disk
3176
    if self.instance.disk_template in constants.DTS_FILEBASED:
3177
      (file_driver, file_path) = self.instance.disks[0].logical_id
3178
      file_path = os.path.dirname(file_path)
3179
    else:
3180
      file_driver = file_path = None
3181

    
3182
    disk = \
3183
      GenerateDiskTemplate(self, self.instance.disk_template,
3184
                           self.instance.uuid, self.instance.primary_node,
3185
                           self.instance.secondary_nodes, [params], file_path,
3186
                           file_driver, idx, self.Log, self.diskparams)[0]
3187

    
3188
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3189

    
3190
    if self.cluster.prealloc_wipe_disks:
3191
      # Wipe new disk
3192
      WipeOrCleanupDisks(self, self.instance,
3193
                         disks=[(idx, disk, 0)],
3194
                         cleanup=new_disks)
3195

    
3196
    if self.op.hotplug:
3197
      _, device_info = AssembleInstanceDisks(self, self.instance,
3198
                                             [disk], check=False)
3199
      _, _, dev_path = device_info[0]
3200
      self._HotplugDevice("ADD", "DISK", disk, dev_path, idx)
3201

    
3202
    return (disk, [
3203
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3204
      ])
3205

    
3206
  @staticmethod
3207
  def _ModifyDisk(idx, disk, params, _):
3208
    """Modifies a disk.
3209

3210
    """
3211
    changes = []
3212
    mode = params.get(constants.IDISK_MODE, None)
3213
    if mode:
3214
      disk.mode = mode
3215
      changes.append(("disk.mode/%d" % idx, disk.mode))
3216

    
3217
    name = params.get(constants.IDISK_NAME, None)
3218
    disk.name = name
3219
    changes.append(("disk.name/%d" % idx, disk.name))
3220

    
3221
    return changes
3222

    
3223
  def _RemoveDisk(self, idx, root, _):
3224
    """Removes a disk.
3225

3226
    """
3227
    if self.op.hotplug and _DeviceHotplugable(root):
3228
      self._HotplugDevice("REMOVE", "DISK", root, None, idx)
3229
      ShutdownInstanceDisks(self, self.instance, [root])
3230

    
3231
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3232
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3233
                             self.instance.primary_node):
3234
      self.cfg.SetDiskID(disk, node_uuid)
3235
      msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3236
      if msg:
3237
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3238
                        " continuing anyway", idx,
3239
                        self.cfg.GetNodeName(node_uuid), msg)
3240

    
3241
    # if this is a DRBD disk, return its port to the pool
3242
    if root.dev_type in constants.LDS_DRBD:
3243
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3244

    
3245
  def _CreateNewNic(self, idx, params, private):
3246
    """Creates data structure for a new network interface.
3247

3248
    """
3249
    mac = params[constants.INIC_MAC]
3250
    ip = params.get(constants.INIC_IP, None)
3251
    net = params.get(constants.INIC_NETWORK, None)
3252
    name = params.get(constants.INIC_NAME, None)
3253
    net_uuid = self.cfg.LookupNetwork(net)
3254
    #TODO: not private.filled?? can a nic have no nicparams??
3255
    nicparams = private.filled
3256
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3257
                       nicparams=nicparams)
3258
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3259

    
3260
    if self.op.hotplug:
3261
      self._HotplugDevice("ADD", "NIC", nobj, None, idx)
3262

    
3263
    desc =  [
3264
      ("nic.%d" % idx,
3265
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3266
       (mac, ip, private.filled[constants.NIC_MODE],
3267
       private.filled[constants.NIC_LINK], net)),
3268
      ]
3269

    
3270
    return (nobj, desc)
3271

    
3272
  def _ApplyNicMods(self, idx, nic, params, private):
3273
    """Modifies a network interface.
3274

3275
    """
3276
    changes = []
3277

    
3278
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3279
      if key in params:
3280
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3281
        setattr(nic, key, params[key])
3282

    
3283
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3284
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3285
    if new_net_uuid != nic.network:
3286
      changes.append(("nic.network/%d" % idx, new_net))
3287
      nic.network = new_net_uuid
3288

    
3289
    if private.filled:
3290
      nic.nicparams = private.filled
3291

    
3292
      for (key, val) in nic.nicparams.items():
3293
        changes.append(("nic.%s/%d" % (key, idx), val))
3294

    
3295
    if self.op.hotplug and _DeviceHotplugable(nic):
3296
      self._HotplugDevice("REMOVE", "NIC", nic, None, idx)
3297
      self._HotplugDevice("ADD", "NIC", nic, None, idx)
3298

    
3299
    return changes
3300

    
3301
  def _RemoveNic(self, idx, nic, _):
3302
    if self.op.hotplug and _DeviceHotplugable(nic):
3303
      self._HotplugDevice("REMOVE", "NIC", nic, None, idx)
3304

    
3305
  def Exec(self, feedback_fn):
3306
    """Modifies an instance.
3307

3308
    All parameters take effect only at the next restart of the instance.
3309

3310
    """
3311
    # Process here the warnings from CheckPrereq, as we don't have a
3312
    # feedback_fn there.
3313
    # TODO: Replace with self.LogWarning
3314
    for warn in self.warn:
3315
      feedback_fn("WARNING: %s" % warn)
3316

    
3317
    assert ((self.op.disk_template is None) ^
3318
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3319
      "Not owning any node resource locks"
3320

    
3321
    result = []
3322

    
3323
    # New primary node
3324
    if self.op.pnode_uuid:
3325
      self.instance.primary_node = self.op.pnode_uuid
3326

    
3327
    # runtime memory
3328
    if self.op.runtime_mem:
3329
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3330
                                                     self.instance,
3331
                                                     self.op.runtime_mem)
3332
      rpcres.Raise("Cannot modify instance runtime memory")
3333
      result.append(("runtime_memory", self.op.runtime_mem))
3334

    
3335
    # Apply disk changes
3336
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3337
                        self._CreateNewDisk, self._ModifyDisk,
3338
                        self._RemoveDisk)
3339
    _UpdateIvNames(0, self.instance.disks)
3340

    
3341
    if self.op.disk_template:
3342
      if __debug__:
3343
        check_nodes = set(self.instance.all_nodes)
3344
        if self.op.remote_node_uuid:
3345
          check_nodes.add(self.op.remote_node_uuid)
3346
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3347
          owned = self.owned_locks(level)
3348
          assert not (check_nodes - owned), \
3349
            ("Not owning the correct locks, owning %r, expected at least %r" %
3350
             (owned, check_nodes))
3351

    
3352
      r_shut = ShutdownInstanceDisks(self, self.instance)
3353
      if not r_shut:
3354
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3355
                                 " proceed with disk template conversion")
3356
      mode = (self.instance.disk_template, self.op.disk_template)
3357
      try:
3358
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3359
      except:
3360
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3361
        raise
3362
      result.append(("disk_template", self.op.disk_template))
3363

    
3364
      assert self.instance.disk_template == self.op.disk_template, \
3365
        ("Expected disk template '%s', found '%s'" %
3366
         (self.op.disk_template, self.instance.disk_template))
3367

    
3368
    # Release node and resource locks if there are any (they might already have
3369
    # been released during disk conversion)
3370
    ReleaseLocks(self, locking.LEVEL_NODE)
3371
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3372

    
3373
    # Apply NIC changes
3374
    if self._new_nics is not None:
3375
      self.instance.nics = self._new_nics
3376
      result.extend(self._nic_chgdesc)
3377

    
3378
    # hvparams changes
3379
    if self.op.hvparams:
3380
      self.instance.hvparams = self.hv_inst
3381
      for key, val in self.op.hvparams.iteritems():
3382
        result.append(("hv/%s" % key, val))
3383

    
3384
    # beparams changes
3385
    if self.op.beparams:
3386
      self.instance.beparams = self.be_inst
3387
      for key, val in self.op.beparams.iteritems():
3388
        result.append(("be/%s" % key, val))
3389

    
3390
    # OS change
3391
    if self.op.os_name:
3392
      self.instance.os = self.op.os_name
3393

    
3394
    # osparams changes
3395
    if self.op.osparams:
3396
      self.instance.osparams = self.os_inst
3397
      for key, val in self.op.osparams.iteritems():
3398
        result.append(("os/%s" % key, val))
3399

    
3400
    if self.op.offline is None:
3401
      # Ignore
3402
      pass
3403
    elif self.op.offline:
3404
      # Mark instance as offline
3405
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3406
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3407
    else:
3408
      # Mark instance as online, but stopped
3409
      self.cfg.MarkInstanceDown(self.instance.uuid)
3410
      result.append(("admin_state", constants.ADMINST_DOWN))
3411

    
3412
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3413

    
3414
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3415
                self.owned_locks(locking.LEVEL_NODE)), \
3416
      "All node locks should have been released by now"
3417

    
3418
    return result
3419

    
3420
  _DISK_CONVERSIONS = {
3421
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3422
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3423
    }
3424

    
3425

    
3426
class LUInstanceChangeGroup(LogicalUnit):
3427
  HPATH = "instance-change-group"
3428
  HTYPE = constants.HTYPE_INSTANCE
3429
  REQ_BGL = False
3430

    
3431
  def ExpandNames(self):
3432
    self.share_locks = ShareAll()
3433

    
3434
    self.needed_locks = {
3435
      locking.LEVEL_NODEGROUP: [],
3436
      locking.LEVEL_NODE: [],
3437
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3438
      }
3439

    
3440
    self._ExpandAndLockInstance()
3441

    
3442
    if self.op.target_groups:
3443
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3444
                                  self.op.target_groups)
3445
    else:
3446
      self.req_target_uuids = None
3447

    
3448
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3449

    
3450
  def DeclareLocks(self, level):
3451
    if level == locking.LEVEL_NODEGROUP:
3452
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3453

    
3454
      if self.req_target_uuids:
3455
        lock_groups = set(self.req_target_uuids)
3456

    
3457
        # Lock all groups used by instance optimistically; this requires going
3458
        # via the node before it's locked, requiring verification later on
3459
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3460
        lock_groups.update(instance_groups)
3461
      else:
3462
        # No target groups, need to lock all of them
3463
        lock_groups = locking.ALL_SET
3464

    
3465
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3466

    
3467
    elif level == locking.LEVEL_NODE:
3468
      if self.req_target_uuids:
3469
        # Lock all nodes used by instances
3470
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3471
        self._LockInstancesNodes()
3472

    
3473
        # Lock all nodes in all potential target groups
3474
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3475
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3476
        member_nodes = [node_uuid
3477
                        for group in lock_groups
3478
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3479
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3480
      else:
3481
        # Lock all nodes as all groups are potential targets
3482
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3483

    
3484
  def CheckPrereq(self):
3485
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3486
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3487
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3488

    
3489
    assert (self.req_target_uuids is None or
3490
            owned_groups.issuperset(self.req_target_uuids))
3491
    assert owned_instance_names == set([self.op.instance_name])
3492

    
3493
    # Get instance information
3494
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3495

    
3496
    # Check if node groups for locked instance are still correct
3497
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3498
      ("Instance %s's nodes changed while we kept the lock" %
3499
       self.op.instance_name)
3500

    
3501
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3502
                                          owned_groups)
3503

    
3504
    if self.req_target_uuids:
3505
      # User requested specific target groups
3506
      self.target_uuids = frozenset(self.req_target_uuids)
3507
    else:
3508
      # All groups except those used by the instance are potential targets
3509
      self.target_uuids = owned_groups - inst_groups
3510

    
3511
    conflicting_groups = self.target_uuids & inst_groups
3512
    if conflicting_groups:
3513
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3514
                                 " used by the instance '%s'" %
3515
                                 (utils.CommaJoin(conflicting_groups),
3516
                                  self.op.instance_name),
3517
                                 errors.ECODE_INVAL)
3518

    
3519
    if not self.target_uuids:
3520
      raise errors.OpPrereqError("There are no possible target groups",
3521
                                 errors.ECODE_INVAL)
3522

    
3523
  def BuildHooksEnv(self):
3524
    """Build hooks env.
3525

3526
    """
3527
    assert self.target_uuids
3528

    
3529
    env = {
3530
      "TARGET_GROUPS": " ".join(self.target_uuids),
3531
      }
3532

    
3533
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3534

    
3535
    return env
3536

    
3537
  def BuildHooksNodes(self):
3538
    """Build hooks nodes.
3539

3540
    """
3541
    mn = self.cfg.GetMasterNode()
3542
    return ([mn], [mn])
3543

    
3544
  def Exec(self, feedback_fn):
3545
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3546

    
3547
    assert instances == [self.op.instance_name], "Instance not locked"
3548

    
3549
    req = iallocator.IAReqGroupChange(instances=instances,
3550
                                      target_groups=list(self.target_uuids))
3551
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3552

    
3553
    ial.Run(self.op.iallocator)
3554

    
3555
    if not ial.success:
3556
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3557
                                 " instance '%s' using iallocator '%s': %s" %
3558
                                 (self.op.instance_name, self.op.iallocator,
3559
                                  ial.info), errors.ECODE_NORES)
3560

    
3561
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3562

    
3563
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3564
                 " instance '%s'", len(jobs), self.op.instance_name)
3565

    
3566
    return ResultWithJobs(jobs)