Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ d0d7d7cf

History | View | Annotate | Download (139.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_name_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176

    
177
    if net is None or net.lower() == constants.VALUE_NONE:
178
      net = None
179
    else:
180
      if nic_mode_req is not None or link is not None:
181
        raise errors.OpPrereqError("If network is given, no mode or link"
182
                                   " is allowed to be passed",
183
                                   errors.ECODE_INVAL)
184

    
185
    # ip validity checks
186
    if ip is None or ip.lower() == constants.VALUE_NONE:
187
      nic_ip = None
188
    elif ip.lower() == constants.VALUE_AUTO:
189
      if not op.name_check:
190
        raise errors.OpPrereqError("IP address set to auto but name checks"
191
                                   " have been skipped",
192
                                   errors.ECODE_INVAL)
193
      nic_ip = default_ip
194
    else:
195
      # We defer pool operations until later, so that the iallocator has
196
      # filled in the instance's node(s) dimara
197
      if ip.lower() == constants.NIC_IP_POOL:
198
        if net is None:
199
          raise errors.OpPrereqError("if ip=pool, parameter network"
200
                                     " must be passed too",
201
                                     errors.ECODE_INVAL)
202

    
203
      elif not netutils.IPAddress.IsValid(ip):
204
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
205
                                   errors.ECODE_INVAL)
206

    
207
      nic_ip = ip
208

    
209
    # TODO: check the ip address for uniqueness
210
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
211
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
212
                                 errors.ECODE_INVAL)
213

    
214
    # MAC address verification
215
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
216
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
217
      mac = utils.NormalizeAndValidateMac(mac)
218

    
219
      try:
220
        # TODO: We need to factor this out
221
        cfg.ReserveMAC(mac, ec_id)
222
      except errors.ReservationError:
223
        raise errors.OpPrereqError("MAC address %s already in use"
224
                                   " in cluster" % mac,
225
                                   errors.ECODE_NOTUNIQUE)
226

    
227
    #  Build nic parameters
228
    nicparams = {}
229
    if nic_mode_req:
230
      nicparams[constants.NIC_MODE] = nic_mode
231
    if link:
232
      nicparams[constants.NIC_LINK] = link
233

    
234
    check_params = cluster.SimpleFillNIC(nicparams)
235
    objects.NIC.CheckParameterSyntax(check_params)
236
    net_uuid = cfg.LookupNetwork(net)
237
    name = nic.get(constants.INIC_NAME, None)
238
    if name is not None and name.lower() == constants.VALUE_NONE:
239
      name = None
240
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
241
                          network=net_uuid, nicparams=nicparams)
242
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
243
    nics.append(nic_obj)
244

    
245
  return nics
246

    
247

    
248
def _CheckForConflictingIp(lu, ip, node_uuid):
249
  """In case of conflicting IP address raise error.
250

251
  @type ip: string
252
  @param ip: IP address
253
  @type node_uuid: string
254
  @param node_uuid: node UUID
255

256
  """
257
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
258
  if conf_net is not None:
259
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
260
                                " network %s, but the target NIC does not." %
261
                                (ip, conf_net)),
262
                               errors.ECODE_STATE)
263

    
264
  return (None, None)
265

    
266

    
267
def _ComputeIPolicyInstanceSpecViolation(
268
  ipolicy, instance_spec, disk_template,
269
  _compute_fn=ComputeIPolicySpecViolation):
270
  """Compute if instance specs meets the specs of ipolicy.
271

272
  @type ipolicy: dict
273
  @param ipolicy: The ipolicy to verify against
274
  @param instance_spec: dict
275
  @param instance_spec: The instance spec to verify
276
  @type disk_template: string
277
  @param disk_template: the disk template of the instance
278
  @param _compute_fn: The function to verify ipolicy (unittest only)
279
  @see: L{ComputeIPolicySpecViolation}
280

281
  """
282
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
283
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
284
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
285
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
286
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
287
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
288

    
289
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
290
                     disk_sizes, spindle_use, disk_template)
291

    
292

    
293
def _CheckOSVariant(os_obj, name):
294
  """Check whether an OS name conforms to the os variants specification.
295

296
  @type os_obj: L{objects.OS}
297
  @param os_obj: OS object to check
298
  @type name: string
299
  @param name: OS name passed by the user, to check for validity
300

301
  """
302
  variant = objects.OS.GetVariant(name)
303
  if not os_obj.supported_variants:
304
    if variant:
305
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
306
                                 " passed)" % (os_obj.name, variant),
307
                                 errors.ECODE_INVAL)
308
    return
309
  if not variant:
310
    raise errors.OpPrereqError("OS name must include a variant",
311
                               errors.ECODE_INVAL)
312

    
313
  if variant not in os_obj.supported_variants:
314
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
315

    
316

    
317
class LUInstanceCreate(LogicalUnit):
318
  """Create an instance.
319

320
  """
321
  HPATH = "instance-add"
322
  HTYPE = constants.HTYPE_INSTANCE
323
  REQ_BGL = False
324

    
325
  def CheckArguments(self):
326
    """Check arguments.
327

328
    """
329
    # do not require name_check to ease forward/backward compatibility
330
    # for tools
331
    if self.op.no_install and self.op.start:
332
      self.LogInfo("No-installation mode selected, disabling startup")
333
      self.op.start = False
334
    # validate/normalize the instance name
335
    self.op.instance_name = \
336
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
337

    
338
    if self.op.ip_check and not self.op.name_check:
339
      # TODO: make the ip check more flexible and not depend on the name check
340
      raise errors.OpPrereqError("Cannot do IP address check without a name"
341
                                 " check", errors.ECODE_INVAL)
342

    
343
    # check nics' parameter names
344
    for nic in self.op.nics:
345
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
346
    # check that NIC's parameters names are unique and valid
347
    utils.ValidateDeviceNames("NIC", self.op.nics)
348

    
349
    # check that disk's names are unique and valid
350
    utils.ValidateDeviceNames("disk", self.op.disks)
351

    
352
    cluster = self.cfg.GetClusterInfo()
353
    if not self.op.disk_template in cluster.enabled_disk_templates:
354
      raise errors.OpPrereqError("Cannot create an instance with disk template"
355
                                 " '%s', because it is not enabled in the"
356
                                 " cluster. Enabled disk templates are: %s." %
357
                                 (self.op.disk_template,
358
                                  ",".join(cluster.enabled_disk_templates)))
359

    
360
    # check disks. parameter names and consistent adopt/no-adopt strategy
361
    has_adopt = has_no_adopt = False
362
    for disk in self.op.disks:
363
      if self.op.disk_template != constants.DT_EXT:
364
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
365
      if constants.IDISK_ADOPT in disk:
366
        has_adopt = True
367
      else:
368
        has_no_adopt = True
369
    if has_adopt and has_no_adopt:
370
      raise errors.OpPrereqError("Either all disks are adopted or none is",
371
                                 errors.ECODE_INVAL)
372
    if has_adopt:
373
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
374
        raise errors.OpPrereqError("Disk adoption is not supported for the"
375
                                   " '%s' disk template" %
376
                                   self.op.disk_template,
377
                                   errors.ECODE_INVAL)
378
      if self.op.iallocator is not None:
379
        raise errors.OpPrereqError("Disk adoption not allowed with an"
380
                                   " iallocator script", errors.ECODE_INVAL)
381
      if self.op.mode == constants.INSTANCE_IMPORT:
382
        raise errors.OpPrereqError("Disk adoption not allowed for"
383
                                   " instance import", errors.ECODE_INVAL)
384
    else:
385
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
386
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
387
                                   " but no 'adopt' parameter given" %
388
                                   self.op.disk_template,
389
                                   errors.ECODE_INVAL)
390

    
391
    self.adopt_disks = has_adopt
392

    
393
    # instance name verification
394
    if self.op.name_check:
395
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
396
      self.op.instance_name = self.hostname1.name
397
      # used in CheckPrereq for ip ping check
398
      self.check_ip = self.hostname1.ip
399
    else:
400
      self.check_ip = None
401

    
402
    # file storage checks
403
    if (self.op.file_driver and
404
        not self.op.file_driver in constants.FILE_DRIVER):
405
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
406
                                 self.op.file_driver, errors.ECODE_INVAL)
407

    
408
    if self.op.disk_template == constants.DT_FILE:
409
      opcodes.RequireFileStorage()
410
    elif self.op.disk_template == constants.DT_SHARED_FILE:
411
      opcodes.RequireSharedFileStorage()
412

    
413
    ### Node/iallocator related checks
414
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
415

    
416
    if self.op.pnode is not None:
417
      if self.op.disk_template in constants.DTS_INT_MIRROR:
418
        if self.op.snode is None:
419
          raise errors.OpPrereqError("The networked disk templates need"
420
                                     " a mirror node", errors.ECODE_INVAL)
421
      elif self.op.snode:
422
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
423
                        " template")
424
        self.op.snode = None
425

    
426
    _CheckOpportunisticLocking(self.op)
427

    
428
    self._cds = GetClusterDomainSecret()
429

    
430
    if self.op.mode == constants.INSTANCE_IMPORT:
431
      # On import force_variant must be True, because if we forced it at
432
      # initial install, our only chance when importing it back is that it
433
      # works again!
434
      self.op.force_variant = True
435

    
436
      if self.op.no_install:
437
        self.LogInfo("No-installation mode has no effect during import")
438

    
439
    elif self.op.mode == constants.INSTANCE_CREATE:
440
      if self.op.os_type is None:
441
        raise errors.OpPrereqError("No guest OS specified",
442
                                   errors.ECODE_INVAL)
443
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
444
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
445
                                   " installation" % self.op.os_type,
446
                                   errors.ECODE_STATE)
447
      if self.op.disk_template is None:
448
        raise errors.OpPrereqError("No disk template specified",
449
                                   errors.ECODE_INVAL)
450

    
451
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
452
      # Check handshake to ensure both clusters have the same domain secret
453
      src_handshake = self.op.source_handshake
454
      if not src_handshake:
455
        raise errors.OpPrereqError("Missing source handshake",
456
                                   errors.ECODE_INVAL)
457

    
458
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
459
                                                           src_handshake)
460
      if errmsg:
461
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
462
                                   errors.ECODE_INVAL)
463

    
464
      # Load and check source CA
465
      self.source_x509_ca_pem = self.op.source_x509_ca
466
      if not self.source_x509_ca_pem:
467
        raise errors.OpPrereqError("Missing source X509 CA",
468
                                   errors.ECODE_INVAL)
469

    
470
      try:
471
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
472
                                                    self._cds)
473
      except OpenSSL.crypto.Error, err:
474
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
475
                                   (err, ), errors.ECODE_INVAL)
476

    
477
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
478
      if errcode is not None:
479
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
480
                                   errors.ECODE_INVAL)
481

    
482
      self.source_x509_ca = cert
483

    
484
      src_instance_name = self.op.source_instance_name
485
      if not src_instance_name:
486
        raise errors.OpPrereqError("Missing source instance name",
487
                                   errors.ECODE_INVAL)
488

    
489
      self.source_instance_name = \
490
        netutils.GetHostname(name=src_instance_name).name
491

    
492
    else:
493
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
494
                                 self.op.mode, errors.ECODE_INVAL)
495

    
496
  def ExpandNames(self):
497
    """ExpandNames for CreateInstance.
498

499
    Figure out the right locks for instance creation.
500

501
    """
502
    self.needed_locks = {}
503

    
504
    # this is just a preventive check, but someone might still add this
505
    # instance in the meantime, and creation will fail at lock-add time
506
    if self.op.instance_name in self.cfg.GetInstanceList():
507
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
508
                                 self.op.instance_name, errors.ECODE_EXISTS)
509

    
510
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
511

    
512
    if self.op.iallocator:
513
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
514
      # specifying a group on instance creation and then selecting nodes from
515
      # that group
516
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
517
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
518

    
519
      if self.op.opportunistic_locking:
520
        self.opportunistic_locks[locking.LEVEL_NODE] = True
521
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
522
    else:
523
      (self.op.pnode_uuid, self.op.pnode) = \
524
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
525
      nodelist = [self.op.pnode_uuid]
526
      if self.op.snode is not None:
527
        (self.op.snode_uuid, self.op.snode) = \
528
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
529
        nodelist.append(self.op.snode_uuid)
530
      self.needed_locks[locking.LEVEL_NODE] = nodelist
531

    
532
    # in case of import lock the source node too
533
    if self.op.mode == constants.INSTANCE_IMPORT:
534
      src_node = self.op.src_node
535
      src_path = self.op.src_path
536

    
537
      if src_path is None:
538
        self.op.src_path = src_path = self.op.instance_name
539

    
540
      if src_node is None:
541
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
542
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
543
        self.op.src_node = None
544
        if os.path.isabs(src_path):
545
          raise errors.OpPrereqError("Importing an instance from a path"
546
                                     " requires a source node option",
547
                                     errors.ECODE_INVAL)
548
      else:
549
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
550
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
551
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
552
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
553
        if not os.path.isabs(src_path):
554
          self.op.src_path = src_path = \
555
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
556

    
557
    self.needed_locks[locking.LEVEL_NODE_RES] = \
558
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
559

    
560
  def _RunAllocator(self):
561
    """Run the allocator based on input opcode.
562

563
    """
564
    if self.op.opportunistic_locking:
565
      # Only consider nodes for which a lock is held
566
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
567
    else:
568
      node_whitelist = None
569

    
570
    #TODO Export network to iallocator so that it chooses a pnode
571
    #     in a nodegroup that has the desired network connected to
572
    req = _CreateInstanceAllocRequest(self.op, self.disks,
573
                                      self.nics, self.be_full,
574
                                      self.cfg.GetNodeNames(node_whitelist))
575
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
576

    
577
    ial.Run(self.op.iallocator)
578

    
579
    if not ial.success:
580
      # When opportunistic locks are used only a temporary failure is generated
581
      if self.op.opportunistic_locking:
582
        ecode = errors.ECODE_TEMP_NORES
583
      else:
584
        ecode = errors.ECODE_NORES
585

    
586
      raise errors.OpPrereqError("Can't compute nodes using"
587
                                 " iallocator '%s': %s" %
588
                                 (self.op.iallocator, ial.info),
589
                                 ecode)
590

    
591
    (self.op.pnode_uuid, self.op.pnode) = \
592
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
593
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
594
                 self.op.instance_name, self.op.iallocator,
595
                 utils.CommaJoin(ial.result))
596

    
597
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
598

    
599
    if req.RequiredNodes() == 2:
600
      (self.op.snode_uuid, self.op.snode) = \
601
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
602

    
603
  def BuildHooksEnv(self):
604
    """Build hooks env.
605

606
    This runs on master, primary and secondary nodes of the instance.
607

608
    """
609
    env = {
610
      "ADD_MODE": self.op.mode,
611
      }
612
    if self.op.mode == constants.INSTANCE_IMPORT:
613
      env["SRC_NODE"] = self.op.src_node
614
      env["SRC_PATH"] = self.op.src_path
615
      env["SRC_IMAGES"] = self.src_images
616

    
617
    env.update(BuildInstanceHookEnv(
618
      name=self.op.instance_name,
619
      primary_node_name=self.op.pnode,
620
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
621
      status=self.op.start,
622
      os_type=self.op.os_type,
623
      minmem=self.be_full[constants.BE_MINMEM],
624
      maxmem=self.be_full[constants.BE_MAXMEM],
625
      vcpus=self.be_full[constants.BE_VCPUS],
626
      nics=NICListToTuple(self, self.nics),
627
      disk_template=self.op.disk_template,
628
      disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
629
              d[constants.IDISK_MODE]) for d in self.disks],
630
      bep=self.be_full,
631
      hvp=self.hv_full,
632
      hypervisor_name=self.op.hypervisor,
633
      tags=self.op.tags,
634
      ))
635

    
636
    return env
637

    
638
  def BuildHooksNodes(self):
639
    """Build hooks nodes.
640

641
    """
642
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
643
    return nl, nl
644

    
645
  def _ReadExportInfo(self):
646
    """Reads the export information from disk.
647

648
    It will override the opcode source node and path with the actual
649
    information, if these two were not specified before.
650

651
    @return: the export information
652

653
    """
654
    assert self.op.mode == constants.INSTANCE_IMPORT
655

    
656
    if self.op.src_node_uuid is None:
657
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
658
      exp_list = self.rpc.call_export_list(locked_nodes)
659
      found = False
660
      for node in exp_list:
661
        if exp_list[node].fail_msg:
662
          continue
663
        if self.op.src_path in exp_list[node].payload:
664
          found = True
665
          self.op.src_node = node
666
          self.op.src_node_uuid = self.cfg.GetNodeInfoByName(node).uuid
667
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
668
                                            self.op.src_path)
669
          break
670
      if not found:
671
        raise errors.OpPrereqError("No export found for relative path %s" %
672
                                   self.op.src_path, errors.ECODE_INVAL)
673

    
674
    CheckNodeOnline(self, self.op.src_node_uuid)
675
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
676
    result.Raise("No export or invalid export found in dir %s" %
677
                 self.op.src_path)
678

    
679
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
680
    if not export_info.has_section(constants.INISECT_EXP):
681
      raise errors.ProgrammerError("Corrupted export config",
682
                                   errors.ECODE_ENVIRON)
683

    
684
    ei_version = export_info.get(constants.INISECT_EXP, "version")
685
    if int(ei_version) != constants.EXPORT_VERSION:
686
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
687
                                 (ei_version, constants.EXPORT_VERSION),
688
                                 errors.ECODE_ENVIRON)
689
    return export_info
690

    
691
  def _ReadExportParams(self, einfo):
692
    """Use export parameters as defaults.
693

694
    In case the opcode doesn't specify (as in override) some instance
695
    parameters, then try to use them from the export information, if
696
    that declares them.
697

698
    """
699
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
700

    
701
    if self.op.disk_template is None:
702
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
703
        self.op.disk_template = einfo.get(constants.INISECT_INS,
704
                                          "disk_template")
705
        if self.op.disk_template not in constants.DISK_TEMPLATES:
706
          raise errors.OpPrereqError("Disk template specified in configuration"
707
                                     " file is not one of the allowed values:"
708
                                     " %s" %
709
                                     " ".join(constants.DISK_TEMPLATES),
710
                                     errors.ECODE_INVAL)
711
      else:
712
        raise errors.OpPrereqError("No disk template specified and the export"
713
                                   " is missing the disk_template information",
714
                                   errors.ECODE_INVAL)
715

    
716
    if not self.op.disks:
717
      disks = []
718
      # TODO: import the disk iv_name too
719
      for idx in range(constants.MAX_DISKS):
720
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
721
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
722
          disks.append({constants.IDISK_SIZE: disk_sz})
723
      self.op.disks = disks
724
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
725
        raise errors.OpPrereqError("No disk info specified and the export"
726
                                   " is missing the disk information",
727
                                   errors.ECODE_INVAL)
728

    
729
    if not self.op.nics:
730
      nics = []
731
      for idx in range(constants.MAX_NICS):
732
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
733
          ndict = {}
734
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
735
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
736
            ndict[name] = v
737
          nics.append(ndict)
738
        else:
739
          break
740
      self.op.nics = nics
741

    
742
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
743
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
744

    
745
    if (self.op.hypervisor is None and
746
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
747
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
748

    
749
    if einfo.has_section(constants.INISECT_HYP):
750
      # use the export parameters but do not override the ones
751
      # specified by the user
752
      for name, value in einfo.items(constants.INISECT_HYP):
753
        if name not in self.op.hvparams:
754
          self.op.hvparams[name] = value
755

    
756
    if einfo.has_section(constants.INISECT_BEP):
757
      # use the parameters, without overriding
758
      for name, value in einfo.items(constants.INISECT_BEP):
759
        if name not in self.op.beparams:
760
          self.op.beparams[name] = value
761
        # Compatibility for the old "memory" be param
762
        if name == constants.BE_MEMORY:
763
          if constants.BE_MAXMEM not in self.op.beparams:
764
            self.op.beparams[constants.BE_MAXMEM] = value
765
          if constants.BE_MINMEM not in self.op.beparams:
766
            self.op.beparams[constants.BE_MINMEM] = value
767
    else:
768
      # try to read the parameters old style, from the main section
769
      for name in constants.BES_PARAMETERS:
770
        if (name not in self.op.beparams and
771
            einfo.has_option(constants.INISECT_INS, name)):
772
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
773

    
774
    if einfo.has_section(constants.INISECT_OSP):
775
      # use the parameters, without overriding
776
      for name, value in einfo.items(constants.INISECT_OSP):
777
        if name not in self.op.osparams:
778
          self.op.osparams[name] = value
779

    
780
  def _RevertToDefaults(self, cluster):
781
    """Revert the instance parameters to the default values.
782

783
    """
784
    # hvparams
785
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
786
    for name in self.op.hvparams.keys():
787
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
788
        del self.op.hvparams[name]
789
    # beparams
790
    be_defs = cluster.SimpleFillBE({})
791
    for name in self.op.beparams.keys():
792
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
793
        del self.op.beparams[name]
794
    # nic params
795
    nic_defs = cluster.SimpleFillNIC({})
796
    for nic in self.op.nics:
797
      for name in constants.NICS_PARAMETERS:
798
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
799
          del nic[name]
800
    # osparams
801
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
802
    for name in self.op.osparams.keys():
803
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
804
        del self.op.osparams[name]
805

    
806
  def _CalculateFileStorageDir(self):
807
    """Calculate final instance file storage dir.
808

809
    """
810
    # file storage dir calculation/check
811
    self.instance_file_storage_dir = None
812
    if self.op.disk_template in constants.DTS_FILEBASED:
813
      # build the full file storage dir path
814
      joinargs = []
815

    
816
      if self.op.disk_template == constants.DT_SHARED_FILE:
817
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
818
      else:
819
        get_fsd_fn = self.cfg.GetFileStorageDir
820

    
821
      cfg_storagedir = get_fsd_fn()
822
      if not cfg_storagedir:
823
        raise errors.OpPrereqError("Cluster file storage dir not defined",
824
                                   errors.ECODE_STATE)
825
      joinargs.append(cfg_storagedir)
826

    
827
      if self.op.file_storage_dir is not None:
828
        joinargs.append(self.op.file_storage_dir)
829

    
830
      joinargs.append(self.op.instance_name)
831

    
832
      # pylint: disable=W0142
833
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
834

    
835
  def CheckPrereq(self): # pylint: disable=R0914
836
    """Check prerequisites.
837

838
    """
839
    self._CalculateFileStorageDir()
840

    
841
    if self.op.mode == constants.INSTANCE_IMPORT:
842
      export_info = self._ReadExportInfo()
843
      self._ReadExportParams(export_info)
844
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
845
    else:
846
      self._old_instance_name = None
847

    
848
    if (not self.cfg.GetVGName() and
849
        self.op.disk_template not in constants.DTS_NOT_LVM):
850
      raise errors.OpPrereqError("Cluster does not support lvm-based"
851
                                 " instances", errors.ECODE_STATE)
852

    
853
    if (self.op.hypervisor is None or
854
        self.op.hypervisor == constants.VALUE_AUTO):
855
      self.op.hypervisor = self.cfg.GetHypervisorType()
856

    
857
    cluster = self.cfg.GetClusterInfo()
858
    enabled_hvs = cluster.enabled_hypervisors
859
    if self.op.hypervisor not in enabled_hvs:
860
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
861
                                 " cluster (%s)" %
862
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
863
                                 errors.ECODE_STATE)
864

    
865
    # Check tag validity
866
    for tag in self.op.tags:
867
      objects.TaggableObject.ValidateTag(tag)
868

    
869
    # check hypervisor parameter syntax (locally)
870
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
871
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
872
                                      self.op.hvparams)
873
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
874
    hv_type.CheckParameterSyntax(filled_hvp)
875
    self.hv_full = filled_hvp
876
    # check that we don't specify global parameters on an instance
877
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
878
                         "instance", "cluster")
879

    
880
    # fill and remember the beparams dict
881
    self.be_full = _ComputeFullBeParams(self.op, cluster)
882

    
883
    # build os parameters
884
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
885

    
886
    # now that hvp/bep are in final format, let's reset to defaults,
887
    # if told to do so
888
    if self.op.identify_defaults:
889
      self._RevertToDefaults(cluster)
890

    
891
    # NIC buildup
892
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
893
                             self.proc.GetECId())
894

    
895
    # disk checks/pre-build
896
    default_vg = self.cfg.GetVGName()
897
    self.disks = ComputeDisks(self.op, default_vg)
898

    
899
    if self.op.mode == constants.INSTANCE_IMPORT:
900
      disk_images = []
901
      for idx in range(len(self.disks)):
902
        option = "disk%d_dump" % idx
903
        if export_info.has_option(constants.INISECT_INS, option):
904
          # FIXME: are the old os-es, disk sizes, etc. useful?
905
          export_name = export_info.get(constants.INISECT_INS, option)
906
          image = utils.PathJoin(self.op.src_path, export_name)
907
          disk_images.append(image)
908
        else:
909
          disk_images.append(False)
910

    
911
      self.src_images = disk_images
912

    
913
      if self.op.instance_name == self._old_instance_name:
914
        for idx, nic in enumerate(self.nics):
915
          if nic.mac == constants.VALUE_AUTO:
916
            nic_mac_ini = "nic%d_mac" % idx
917
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
918

    
919
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
920

    
921
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
922
    if self.op.ip_check:
923
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
924
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
925
                                   (self.check_ip, self.op.instance_name),
926
                                   errors.ECODE_NOTUNIQUE)
927

    
928
    #### mac address generation
929
    # By generating here the mac address both the allocator and the hooks get
930
    # the real final mac address rather than the 'auto' or 'generate' value.
931
    # There is a race condition between the generation and the instance object
932
    # creation, which means that we know the mac is valid now, but we're not
933
    # sure it will be when we actually add the instance. If things go bad
934
    # adding the instance will abort because of a duplicate mac, and the
935
    # creation job will fail.
936
    for nic in self.nics:
937
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
938
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
939

    
940
    #### allocator run
941

    
942
    if self.op.iallocator is not None:
943
      self._RunAllocator()
944

    
945
    # Release all unneeded node locks
946
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
947
                               self.op.src_node_uuid])
948
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
949
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
950
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
951

    
952
    assert (self.owned_locks(locking.LEVEL_NODE) ==
953
            self.owned_locks(locking.LEVEL_NODE_RES)), \
954
      "Node locks differ from node resource locks"
955

    
956
    #### node related checks
957

    
958
    # check primary node
959
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
960
    assert self.pnode is not None, \
961
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
962
    if pnode.offline:
963
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
964
                                 pnode.name, errors.ECODE_STATE)
965
    if pnode.drained:
966
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
967
                                 pnode.name, errors.ECODE_STATE)
968
    if not pnode.vm_capable:
969
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
970
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
971

    
972
    self.secondaries = []
973

    
974
    # Fill in any IPs from IP pools. This must happen here, because we need to
975
    # know the nic's primary node, as specified by the iallocator
976
    for idx, nic in enumerate(self.nics):
977
      net_uuid = nic.network
978
      if net_uuid is not None:
979
        nobj = self.cfg.GetNetwork(net_uuid)
980
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
981
        if netparams is None:
982
          raise errors.OpPrereqError("No netparams found for network"
983
                                     " %s. Propably not connected to"
984
                                     " node's %s nodegroup" %
985
                                     (nobj.name, self.pnode.name),
986
                                     errors.ECODE_INVAL)
987
        self.LogInfo("NIC/%d inherits netparams %s" %
988
                     (idx, netparams.values()))
989
        nic.nicparams = dict(netparams)
990
        if nic.ip is not None:
991
          if nic.ip.lower() == constants.NIC_IP_POOL:
992
            try:
993
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
994
            except errors.ReservationError:
995
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
996
                                         " from the address pool" % idx,
997
                                         errors.ECODE_STATE)
998
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
999
          else:
1000
            try:
1001
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1002
            except errors.ReservationError:
1003
              raise errors.OpPrereqError("IP address %s already in use"
1004
                                         " or does not belong to network %s" %
1005
                                         (nic.ip, nobj.name),
1006
                                         errors.ECODE_NOTUNIQUE)
1007

    
1008
      # net is None, ip None or given
1009
      elif self.op.conflicts_check:
1010
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1011

    
1012
    # mirror node verification
1013
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1014
      if self.op.snode_uuid == pnode.uuid:
1015
        raise errors.OpPrereqError("The secondary node cannot be the"
1016
                                   " primary node", errors.ECODE_INVAL)
1017
      CheckNodeOnline(self, self.op.snode_uuid)
1018
      CheckNodeNotDrained(self, self.op.snode_uuid)
1019
      CheckNodeVmCapable(self, self.op.snode_uuid)
1020
      self.secondaries.append(self.op.snode_uuid)
1021

    
1022
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1023
      if pnode.group != snode.group:
1024
        self.LogWarning("The primary and secondary nodes are in two"
1025
                        " different node groups; the disk parameters"
1026
                        " from the first disk's node group will be"
1027
                        " used")
1028

    
1029
    nodes = [pnode]
1030
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1031
      nodes.append(snode)
1032
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1033
    excl_stor = compat.any(map(has_es, nodes))
1034
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1035
      raise errors.OpPrereqError("Disk template %s not supported with"
1036
                                 " exclusive storage" % self.op.disk_template,
1037
                                 errors.ECODE_STATE)
1038
    for disk in self.disks:
1039
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1040

    
1041
    node_uuids = [pnode.uuid] + self.secondaries
1042

    
1043
    if not self.adopt_disks:
1044
      if self.op.disk_template == constants.DT_RBD:
1045
        # _CheckRADOSFreeSpace() is just a placeholder.
1046
        # Any function that checks prerequisites can be placed here.
1047
        # Check if there is enough space on the RADOS cluster.
1048
        CheckRADOSFreeSpace()
1049
      elif self.op.disk_template == constants.DT_EXT:
1050
        # FIXME: Function that checks prereqs if needed
1051
        pass
1052
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1053
        # Check lv size requirements, if not adopting
1054
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1055
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1056
      else:
1057
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1058
        pass
1059

    
1060
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1061
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1062
                                disk[constants.IDISK_ADOPT])
1063
                     for disk in self.disks])
1064
      if len(all_lvs) != len(self.disks):
1065
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1066
                                   errors.ECODE_INVAL)
1067
      for lv_name in all_lvs:
1068
        try:
1069
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1070
          # to ReserveLV uses the same syntax
1071
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1072
        except errors.ReservationError:
1073
          raise errors.OpPrereqError("LV named %s used by another instance" %
1074
                                     lv_name, errors.ECODE_NOTUNIQUE)
1075

    
1076
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1077
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1078

    
1079
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1080
                                       vg_names.payload.keys())[pnode.uuid]
1081
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1082
      node_lvs = node_lvs.payload
1083

    
1084
      delta = all_lvs.difference(node_lvs.keys())
1085
      if delta:
1086
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1087
                                   utils.CommaJoin(delta),
1088
                                   errors.ECODE_INVAL)
1089
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1090
      if online_lvs:
1091
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1092
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1093
                                   errors.ECODE_STATE)
1094
      # update the size of disk based on what is found
1095
      for dsk in self.disks:
1096
        dsk[constants.IDISK_SIZE] = \
1097
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1098
                                        dsk[constants.IDISK_ADOPT])][0]))
1099

    
1100
    elif self.op.disk_template == constants.DT_BLOCK:
1101
      # Normalize and de-duplicate device paths
1102
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1103
                       for disk in self.disks])
1104
      if len(all_disks) != len(self.disks):
1105
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1106
                                   errors.ECODE_INVAL)
1107
      baddisks = [d for d in all_disks
1108
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1109
      if baddisks:
1110
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1111
                                   " cannot be adopted" %
1112
                                   (utils.CommaJoin(baddisks),
1113
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1114
                                   errors.ECODE_INVAL)
1115

    
1116
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1117
                                            list(all_disks))[pnode.uuid]
1118
      node_disks.Raise("Cannot get block device information from node %s" %
1119
                       pnode.name)
1120
      node_disks = node_disks.payload
1121
      delta = all_disks.difference(node_disks.keys())
1122
      if delta:
1123
        raise errors.OpPrereqError("Missing block device(s): %s" %
1124
                                   utils.CommaJoin(delta),
1125
                                   errors.ECODE_INVAL)
1126
      for dsk in self.disks:
1127
        dsk[constants.IDISK_SIZE] = \
1128
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1129

    
1130
    # Verify instance specs
1131
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1132
    ispec = {
1133
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1134
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1135
      constants.ISPEC_DISK_COUNT: len(self.disks),
1136
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1137
                                  for disk in self.disks],
1138
      constants.ISPEC_NIC_COUNT: len(self.nics),
1139
      constants.ISPEC_SPINDLE_USE: spindle_use,
1140
      }
1141

    
1142
    group_info = self.cfg.GetNodeGroup(pnode.group)
1143
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1144
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1145
                                               self.op.disk_template)
1146
    if not self.op.ignore_ipolicy and res:
1147
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1148
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1149
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1150

    
1151
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1152

    
1153
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1154
    # check OS parameters (remotely)
1155
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1156

    
1157
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1158

    
1159
    #TODO: _CheckExtParams (remotely)
1160
    # Check parameters for extstorage
1161

    
1162
    # memory check on primary node
1163
    #TODO(dynmem): use MINMEM for checking
1164
    if self.op.start:
1165
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1166
                                self.op.hvparams)
1167
      CheckNodeFreeMemory(self, self.pnode.uuid,
1168
                          "creating instance %s" % self.op.instance_name,
1169
                          self.be_full[constants.BE_MAXMEM],
1170
                          self.op.hypervisor, hvfull)
1171

    
1172
    self.dry_run_result = list(node_uuids)
1173

    
1174
  def Exec(self, feedback_fn):
1175
    """Create and add the instance to the cluster.
1176

1177
    """
1178
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1179
                self.owned_locks(locking.LEVEL_NODE)), \
1180
      "Node locks differ from node resource locks"
1181
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1182

    
1183
    ht_kind = self.op.hypervisor
1184
    if ht_kind in constants.HTS_REQ_PORT:
1185
      network_port = self.cfg.AllocatePort()
1186
    else:
1187
      network_port = None
1188

    
1189
    # This is ugly but we got a chicken-egg problem here
1190
    # We can only take the group disk parameters, as the instance
1191
    # has no disks yet (we are generating them right here).
1192
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1193
    disks = GenerateDiskTemplate(self,
1194
                                 self.op.disk_template,
1195
                                 self.op.instance_name, self.pnode.uuid,
1196
                                 self.secondaries,
1197
                                 self.disks,
1198
                                 self.instance_file_storage_dir,
1199
                                 self.op.file_driver,
1200
                                 0,
1201
                                 feedback_fn,
1202
                                 self.cfg.GetGroupDiskParams(nodegroup))
1203

    
1204
    iobj = objects.Instance(name=self.op.instance_name, os=self.op.os_type,
1205
                            primary_node=self.pnode.uuid,
1206
                            nics=self.nics, disks=disks,
1207
                            disk_template=self.op.disk_template,
1208
                            disks_active=False,
1209
                            admin_state=constants.ADMINST_DOWN,
1210
                            network_port=network_port,
1211
                            beparams=self.op.beparams,
1212
                            hvparams=self.op.hvparams,
1213
                            hypervisor=self.op.hypervisor,
1214
                            osparams=self.op.osparams,
1215
                            )
1216

    
1217
    if self.op.tags:
1218
      for tag in self.op.tags:
1219
        iobj.AddTag(tag)
1220

    
1221
    if self.adopt_disks:
1222
      if self.op.disk_template == constants.DT_PLAIN:
1223
        # rename LVs to the newly-generated names; we need to construct
1224
        # 'fake' LV disks with the old data, plus the new unique_id
1225
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1226
        rename_to = []
1227
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1228
          rename_to.append(t_dsk.logical_id)
1229
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1230
          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1231
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1232
                                               zip(tmp_disks, rename_to))
1233
        result.Raise("Failed to rename adoped LVs")
1234
    else:
1235
      feedback_fn("* creating instance disks...")
1236
      try:
1237
        CreateDisks(self, iobj)
1238
      except errors.OpExecError:
1239
        self.LogWarning("Device creation failed")
1240
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1241
        raise
1242

    
1243
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1244

    
1245
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1246

    
1247
    # Declare that we don't want to remove the instance lock anymore, as we've
1248
    # added the instance to the config
1249
    del self.remove_locks[locking.LEVEL_INSTANCE]
1250

    
1251
    if self.op.mode == constants.INSTANCE_IMPORT:
1252
      # Release unused nodes
1253
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1254
    else:
1255
      # Release all nodes
1256
      ReleaseLocks(self, locking.LEVEL_NODE)
1257

    
1258
    disk_abort = False
1259
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1260
      feedback_fn("* wiping instance disks...")
1261
      try:
1262
        WipeDisks(self, iobj)
1263
      except errors.OpExecError, err:
1264
        logging.exception("Wiping disks failed")
1265
        self.LogWarning("Wiping instance disks failed (%s)", err)
1266
        disk_abort = True
1267

    
1268
    if disk_abort:
1269
      # Something is already wrong with the disks, don't do anything else
1270
      pass
1271
    elif self.op.wait_for_sync:
1272
      disk_abort = not WaitForSync(self, iobj)
1273
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1274
      # make sure the disks are not degraded (still sync-ing is ok)
1275
      feedback_fn("* checking mirrors status")
1276
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1277
    else:
1278
      disk_abort = False
1279

    
1280
    if disk_abort:
1281
      RemoveDisks(self, iobj)
1282
      self.cfg.RemoveInstance(iobj.name)
1283
      # Make sure the instance lock gets removed
1284
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1285
      raise errors.OpExecError("There are some degraded disks for"
1286
                               " this instance")
1287

    
1288
    # instance disks are now active
1289
    iobj.disks_active = True
1290

    
1291
    # Release all node resource locks
1292
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1293

    
1294
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1295
      # we need to set the disks ID to the primary node, since the
1296
      # preceding code might or might have not done it, depending on
1297
      # disk template and other options
1298
      for disk in iobj.disks:
1299
        self.cfg.SetDiskID(disk, self.pnode.uuid)
1300
      if self.op.mode == constants.INSTANCE_CREATE:
1301
        if not self.op.no_install:
1302
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1303
                        not self.op.wait_for_sync)
1304
          if pause_sync:
1305
            feedback_fn("* pausing disk sync to install instance OS")
1306
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1307
                                                              (iobj.disks,
1308
                                                               iobj), True)
1309
            for idx, success in enumerate(result.payload):
1310
              if not success:
1311
                logging.warn("pause-sync of instance %s for disk %d failed",
1312
                             self.op.instance_name, idx)
1313

    
1314
          feedback_fn("* running the instance OS create scripts...")
1315
          # FIXME: pass debug option from opcode to backend
1316
          os_add_result = \
1317
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1318
                                          self.op.debug_level)
1319
          if pause_sync:
1320
            feedback_fn("* resuming disk sync")
1321
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1322
                                                              (iobj.disks,
1323
                                                               iobj), False)
1324
            for idx, success in enumerate(result.payload):
1325
              if not success:
1326
                logging.warn("resume-sync of instance %s for disk %d failed",
1327
                             self.op.instance_name, idx)
1328

    
1329
          os_add_result.Raise("Could not add os for instance %s"
1330
                              " on node %s" % (self.op.instance_name,
1331
                                               self.pnode.name))
1332

    
1333
      else:
1334
        if self.op.mode == constants.INSTANCE_IMPORT:
1335
          feedback_fn("* running the instance OS import scripts...")
1336

    
1337
          transfers = []
1338

    
1339
          for idx, image in enumerate(self.src_images):
1340
            if not image:
1341
              continue
1342

    
1343
            # FIXME: pass debug option from opcode to backend
1344
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1345
                                               constants.IEIO_FILE, (image, ),
1346
                                               constants.IEIO_SCRIPT,
1347
                                               (iobj.disks[idx], idx),
1348
                                               None)
1349
            transfers.append(dt)
1350

    
1351
          import_result = \
1352
            masterd.instance.TransferInstanceData(self, feedback_fn,
1353
                                                  self.op.src_node_uuid,
1354
                                                  self.pnode.uuid,
1355
                                                  self.pnode.secondary_ip,
1356
                                                  iobj, transfers)
1357
          if not compat.all(import_result):
1358
            self.LogWarning("Some disks for instance %s on node %s were not"
1359
                            " imported successfully" % (self.op.instance_name,
1360
                                                        self.pnode.name))
1361

    
1362
          rename_from = self._old_instance_name
1363

    
1364
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1365
          feedback_fn("* preparing remote import...")
1366
          # The source cluster will stop the instance before attempting to make
1367
          # a connection. In some cases stopping an instance can take a long
1368
          # time, hence the shutdown timeout is added to the connection
1369
          # timeout.
1370
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1371
                             self.op.source_shutdown_timeout)
1372
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1373

    
1374
          assert iobj.primary_node == self.pnode.uuid
1375
          disk_results = \
1376
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1377
                                          self.source_x509_ca,
1378
                                          self._cds, timeouts)
1379
          if not compat.all(disk_results):
1380
            # TODO: Should the instance still be started, even if some disks
1381
            # failed to import (valid for local imports, too)?
1382
            self.LogWarning("Some disks for instance %s on node %s were not"
1383
                            " imported successfully" % (self.op.instance_name,
1384
                                                        self.pnode.name))
1385

    
1386
          rename_from = self.source_instance_name
1387

    
1388
        else:
1389
          # also checked in the prereq part
1390
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1391
                                       % self.op.mode)
1392

    
1393
        # Run rename script on newly imported instance
1394
        assert iobj.name == self.op.instance_name
1395
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1396
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1397
                                                   rename_from,
1398
                                                   self.op.debug_level)
1399
        result.Warn("Failed to run rename script for %s on node %s" %
1400
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1401

    
1402
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1403

    
1404
    if self.op.start:
1405
      iobj.admin_state = constants.ADMINST_UP
1406
      self.cfg.Update(iobj, feedback_fn)
1407
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1408
                   self.pnode.name)
1409
      feedback_fn("* starting instance...")
1410
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1411
                                            False, self.op.reason)
1412
      result.Raise("Could not start instance")
1413

    
1414
    return list(iobj.all_nodes)
1415

    
1416

    
1417
class LUInstanceRename(LogicalUnit):
1418
  """Rename an instance.
1419

1420
  """
1421
  HPATH = "instance-rename"
1422
  HTYPE = constants.HTYPE_INSTANCE
1423

    
1424
  def CheckArguments(self):
1425
    """Check arguments.
1426

1427
    """
1428
    if self.op.ip_check and not self.op.name_check:
1429
      # TODO: make the ip check more flexible and not depend on the name check
1430
      raise errors.OpPrereqError("IP address check requires a name check",
1431
                                 errors.ECODE_INVAL)
1432

    
1433
  def BuildHooksEnv(self):
1434
    """Build hooks env.
1435

1436
    This runs on master, primary and secondary nodes of the instance.
1437

1438
    """
1439
    env = BuildInstanceHookEnvByObject(self, self.instance)
1440
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1441
    return env
1442

    
1443
  def BuildHooksNodes(self):
1444
    """Build hooks nodes.
1445

1446
    """
1447
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1448
    return (nl, nl)
1449

    
1450
  def CheckPrereq(self):
1451
    """Check prerequisites.
1452

1453
    This checks that the instance is in the cluster and is not running.
1454

1455
    """
1456
    self.op.instance_name = ExpandInstanceName(self.cfg,
1457
                                               self.op.instance_name)
1458
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1459
    assert instance is not None
1460
    CheckNodeOnline(self, instance.primary_node)
1461
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1462
                       msg="cannot rename")
1463
    self.instance = instance
1464

    
1465
    new_name = self.op.new_name
1466
    if self.op.name_check:
1467
      hostname = _CheckHostnameSane(self, new_name)
1468
      new_name = self.op.new_name = hostname.name
1469
      if (self.op.ip_check and
1470
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1471
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1472
                                   (hostname.ip, new_name),
1473
                                   errors.ECODE_NOTUNIQUE)
1474

    
1475
    instance_list = self.cfg.GetInstanceList()
1476
    if new_name in instance_list and new_name != instance.name:
1477
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1478
                                 new_name, errors.ECODE_EXISTS)
1479

    
1480
  def Exec(self, feedback_fn):
1481
    """Rename the instance.
1482

1483
    """
1484
    old_name = self.instance.name
1485

    
1486
    rename_file_storage = False
1487
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1488
        self.op.new_name != self.instance.name):
1489
      old_file_storage_dir = os.path.dirname(
1490
                               self.instance.disks[0].logical_id[1])
1491
      rename_file_storage = True
1492

    
1493
    self.cfg.RenameInstance(self.instance.name, self.op.new_name)
1494
    # Change the instance lock. This is definitely safe while we hold the BGL.
1495
    # Otherwise the new lock would have to be added in acquired mode.
1496
    assert self.REQ_BGL
1497
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1498
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1499
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1500

    
1501
    # re-read the instance from the configuration after rename
1502
    renamed_inst = self.cfg.GetInstanceInfo(self.op.new_name)
1503

    
1504
    if rename_file_storage:
1505
      new_file_storage_dir = os.path.dirname(
1506
                               renamed_inst.disks[0].logical_id[1])
1507
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1508
                                                     old_file_storage_dir,
1509
                                                     new_file_storage_dir)
1510
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1511
                   " (but the instance has been renamed in Ganeti)" %
1512
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1513
                    old_file_storage_dir, new_file_storage_dir))
1514

    
1515
    StartInstanceDisks(self, renamed_inst, None)
1516
    # update info on disks
1517
    info = GetInstanceInfoText(renamed_inst)
1518
    for (idx, disk) in enumerate(renamed_inst.disks):
1519
      for node_uuid in renamed_inst.all_nodes:
1520
        self.cfg.SetDiskID(disk, node_uuid)
1521
        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1522
        result.Warn("Error setting info on node %s for disk %s" %
1523
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1524
    try:
1525
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1526
                                                 renamed_inst, old_name,
1527
                                                 self.op.debug_level)
1528
      result.Warn("Could not run OS rename script for instance %s on node %s"
1529
                  " (but the instance has been renamed in Ganeti)" %
1530
                  (renamed_inst.name,
1531
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1532
                  self.LogWarning)
1533
    finally:
1534
      ShutdownInstanceDisks(self, renamed_inst)
1535

    
1536
    return renamed_inst.name
1537

    
1538

    
1539
class LUInstanceRemove(LogicalUnit):
1540
  """Remove an instance.
1541

1542
  """
1543
  HPATH = "instance-remove"
1544
  HTYPE = constants.HTYPE_INSTANCE
1545
  REQ_BGL = False
1546

    
1547
  def ExpandNames(self):
1548
    self._ExpandAndLockInstance()
1549
    self.needed_locks[locking.LEVEL_NODE] = []
1550
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1551
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1552

    
1553
  def DeclareLocks(self, level):
1554
    if level == locking.LEVEL_NODE:
1555
      self._LockInstancesNodes()
1556
    elif level == locking.LEVEL_NODE_RES:
1557
      # Copy node locks
1558
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1559
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1560

    
1561
  def BuildHooksEnv(self):
1562
    """Build hooks env.
1563

1564
    This runs on master, primary and secondary nodes of the instance.
1565

1566
    """
1567
    env = BuildInstanceHookEnvByObject(self, self.instance)
1568
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1569
    return env
1570

    
1571
  def BuildHooksNodes(self):
1572
    """Build hooks nodes.
1573

1574
    """
1575
    nl = [self.cfg.GetMasterNode()]
1576
    nl_post = list(self.instance.all_nodes) + nl
1577
    return (nl, nl_post)
1578

    
1579
  def CheckPrereq(self):
1580
    """Check prerequisites.
1581

1582
    This checks that the instance is in the cluster.
1583

1584
    """
1585
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1586
    assert self.instance is not None, \
1587
      "Cannot retrieve locked instance %s" % self.op.instance_name
1588

    
1589
  def Exec(self, feedback_fn):
1590
    """Remove the instance.
1591

1592
    """
1593
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1594
                 self.cfg.GetNodeName(self.instance.primary_node))
1595

    
1596
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1597
                                             self.instance,
1598
                                             self.op.shutdown_timeout,
1599
                                             self.op.reason)
1600
    if self.op.ignore_failures:
1601
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1602
    else:
1603
      result.Raise("Could not shutdown instance %s on node %s" %
1604
                   (self.instance.name,
1605
                    self.cfg.GetNodeName(self.instance.primary_node)))
1606

    
1607
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1608
            self.owned_locks(locking.LEVEL_NODE_RES))
1609
    assert not (set(self.instance.all_nodes) -
1610
                self.owned_locks(locking.LEVEL_NODE)), \
1611
      "Not owning correct locks"
1612

    
1613
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1614

    
1615

    
1616
class LUInstanceMove(LogicalUnit):
1617
  """Move an instance by data-copying.
1618

1619
  """
1620
  HPATH = "instance-move"
1621
  HTYPE = constants.HTYPE_INSTANCE
1622
  REQ_BGL = False
1623

    
1624
  def ExpandNames(self):
1625
    self._ExpandAndLockInstance()
1626
    (self.op.target_node_uuid, self.op.target_node) = \
1627
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1628
                            self.op.target_node)
1629
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node]
1630
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1631
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1632

    
1633
  def DeclareLocks(self, level):
1634
    if level == locking.LEVEL_NODE:
1635
      self._LockInstancesNodes(primary_only=True)
1636
    elif level == locking.LEVEL_NODE_RES:
1637
      # Copy node locks
1638
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1639
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1640

    
1641
  def BuildHooksEnv(self):
1642
    """Build hooks env.
1643

1644
    This runs on master, primary and secondary nodes of the instance.
1645

1646
    """
1647
    env = {
1648
      "TARGET_NODE": self.op.target_node,
1649
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1650
      }
1651
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1652
    return env
1653

    
1654
  def BuildHooksNodes(self):
1655
    """Build hooks nodes.
1656

1657
    """
1658
    nl = [
1659
      self.cfg.GetMasterNode(),
1660
      self.instance.primary_node,
1661
      self.op.target_node_uuid,
1662
      ]
1663
    return (nl, nl)
1664

    
1665
  def CheckPrereq(self):
1666
    """Check prerequisites.
1667

1668
    This checks that the instance is in the cluster.
1669

1670
    """
1671
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1672
    assert self.instance is not None, \
1673
      "Cannot retrieve locked instance %s" % self.op.instance_name
1674

    
1675
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1676
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1677
                                 self.instance.disk_template,
1678
                                 errors.ECODE_STATE)
1679

    
1680
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1681
    assert target_node is not None, \
1682
      "Cannot retrieve locked node %s" % self.op.target_node
1683

    
1684
    self.target_node_uuid = target_node.uuid
1685
    if target_node.uuid == self.instance.primary_node:
1686
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1687
                                 (self.instance.name, target_node.name),
1688
                                 errors.ECODE_STATE)
1689

    
1690
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1691

    
1692
    for idx, dsk in enumerate(self.instance.disks):
1693
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1694
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1695
                                   " cannot copy" % idx, errors.ECODE_STATE)
1696

    
1697
    CheckNodeOnline(self, target_node.uuid)
1698
    CheckNodeNotDrained(self, target_node.uuid)
1699
    CheckNodeVmCapable(self, target_node.uuid)
1700
    cluster = self.cfg.GetClusterInfo()
1701
    group_info = self.cfg.GetNodeGroup(target_node.group)
1702
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1703
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1704
                           ignore=self.op.ignore_ipolicy)
1705

    
1706
    if self.instance.admin_state == constants.ADMINST_UP:
1707
      # check memory requirements on the secondary node
1708
      CheckNodeFreeMemory(
1709
          self, target_node.uuid, "failing over instance %s" %
1710
          self.instance.name, bep[constants.BE_MAXMEM],
1711
          self.instance.hypervisor,
1712
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1713
    else:
1714
      self.LogInfo("Not checking memory on the secondary node as"
1715
                   " instance will not be started")
1716

    
1717
    # check bridge existance
1718
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1719

    
1720
  def Exec(self, feedback_fn):
1721
    """Move an instance.
1722

1723
    The move is done by shutting it down on its present node, copying
1724
    the data over (slow) and starting it on the new node.
1725

1726
    """
1727
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1728
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1729

    
1730
    self.LogInfo("Shutting down instance %s on source node %s",
1731
                 self.instance.name, source_node.name)
1732

    
1733
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1734
            self.owned_locks(locking.LEVEL_NODE_RES))
1735

    
1736
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1737
                                             self.op.shutdown_timeout,
1738
                                             self.op.reason)
1739
    if self.op.ignore_consistency:
1740
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1741
                  " anyway. Please make sure node %s is down. Error details" %
1742
                  (self.instance.name, source_node.name, source_node.name),
1743
                  self.LogWarning)
1744
    else:
1745
      result.Raise("Could not shutdown instance %s on node %s" %
1746
                   (self.instance.name, source_node.name))
1747

    
1748
    # create the target disks
1749
    try:
1750
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1751
    except errors.OpExecError:
1752
      self.LogWarning("Device creation failed")
1753
      self.cfg.ReleaseDRBDMinors(self.instance.name)
1754
      raise
1755

    
1756
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1757

    
1758
    errs = []
1759
    # activate, get path, copy the data over
1760
    for idx, disk in enumerate(self.instance.disks):
1761
      self.LogInfo("Copying data for disk %d", idx)
1762
      result = self.rpc.call_blockdev_assemble(
1763
                 target_node.uuid, (disk, self.instance), self.instance.name,
1764
                 True, idx)
1765
      if result.fail_msg:
1766
        self.LogWarning("Can't assemble newly created disk %d: %s",
1767
                        idx, result.fail_msg)
1768
        errs.append(result.fail_msg)
1769
        break
1770
      dev_path = result.payload
1771
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1772
                                                                self.instance),
1773
                                             target_node.name, dev_path,
1774
                                             cluster_name)
1775
      if result.fail_msg:
1776
        self.LogWarning("Can't copy data over for disk %d: %s",
1777
                        idx, result.fail_msg)
1778
        errs.append(result.fail_msg)
1779
        break
1780

    
1781
    if errs:
1782
      self.LogWarning("Some disks failed to copy, aborting")
1783
      try:
1784
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1785
      finally:
1786
        self.cfg.ReleaseDRBDMinors(self.instance.name)
1787
        raise errors.OpExecError("Errors during disk copy: %s" %
1788
                                 (",".join(errs),))
1789

    
1790
    self.instance.primary_node = target_node.uuid
1791
    self.cfg.Update(self.instance, feedback_fn)
1792

    
1793
    self.LogInfo("Removing the disks on the original node")
1794
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1795

    
1796
    # Only start the instance if it's marked as up
1797
    if self.instance.admin_state == constants.ADMINST_UP:
1798
      self.LogInfo("Starting instance %s on node %s",
1799
                   self.instance.name, target_node.name)
1800

    
1801
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1802
                                          ignore_secondaries=True)
1803
      if not disks_ok:
1804
        ShutdownInstanceDisks(self, self.instance)
1805
        raise errors.OpExecError("Can't activate the instance's disks")
1806

    
1807
      result = self.rpc.call_instance_start(target_node.uuid,
1808
                                            (self.instance, None, None), False,
1809
                                            self.op.reason)
1810
      msg = result.fail_msg
1811
      if msg:
1812
        ShutdownInstanceDisks(self, self.instance)
1813
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1814
                                 (self.instance.name, target_node.name, msg))
1815

    
1816

    
1817
class LUInstanceMultiAlloc(NoHooksLU):
1818
  """Allocates multiple instances at the same time.
1819

1820
  """
1821
  REQ_BGL = False
1822

    
1823
  def CheckArguments(self):
1824
    """Check arguments.
1825

1826
    """
1827
    nodes = []
1828
    for inst in self.op.instances:
1829
      if inst.iallocator is not None:
1830
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1831
                                   " instance objects", errors.ECODE_INVAL)
1832
      nodes.append(bool(inst.pnode))
1833
      if inst.disk_template in constants.DTS_INT_MIRROR:
1834
        nodes.append(bool(inst.snode))
1835

    
1836
    has_nodes = compat.any(nodes)
1837
    if compat.all(nodes) ^ has_nodes:
1838
      raise errors.OpPrereqError("There are instance objects providing"
1839
                                 " pnode/snode while others do not",
1840
                                 errors.ECODE_INVAL)
1841

    
1842
    if self.op.iallocator is None:
1843
      default_iallocator = self.cfg.GetDefaultIAllocator()
1844
      if default_iallocator and has_nodes:
1845
        self.op.iallocator = default_iallocator
1846
      else:
1847
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1848
                                   " given and no cluster-wide default"
1849
                                   " iallocator found; please specify either"
1850
                                   " an iallocator or nodes on the instances"
1851
                                   " or set a cluster-wide default iallocator",
1852
                                   errors.ECODE_INVAL)
1853

    
1854
    _CheckOpportunisticLocking(self.op)
1855

    
1856
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1857
    if dups:
1858
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1859
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1860

    
1861
  def ExpandNames(self):
1862
    """Calculate the locks.
1863

1864
    """
1865
    self.share_locks = ShareAll()
1866
    self.needed_locks = {
1867
      # iallocator will select nodes and even if no iallocator is used,
1868
      # collisions with LUInstanceCreate should be avoided
1869
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1870
      }
1871

    
1872
    if self.op.iallocator:
1873
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1874
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1875

    
1876
      if self.op.opportunistic_locking:
1877
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1878
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1879
    else:
1880
      nodeslist = []
1881
      for inst in self.op.instances:
1882
        (inst.pnode_uuid, inst.pnode) = \
1883
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1884
        nodeslist.append(inst.pnode)
1885
        if inst.snode is not None:
1886
          (inst.snode_uuid, inst.snode) = \
1887
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1888
          nodeslist.append(inst.snode)
1889

    
1890
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1891
      # Lock resources of instance's primary and secondary nodes (copy to
1892
      # prevent accidential modification)
1893
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1894

    
1895
  def CheckPrereq(self):
1896
    """Check prerequisite.
1897

1898
    """
1899
    cluster = self.cfg.GetClusterInfo()
1900
    default_vg = self.cfg.GetVGName()
1901
    ec_id = self.proc.GetECId()
1902

    
1903
    if self.op.opportunistic_locking:
1904
      # Only consider nodes for which a lock is held
1905
      node_whitelist = self.cfg.GetNodeNames(
1906
                         list(self.owned_locks(locking.LEVEL_NODE)))
1907
    else:
1908
      node_whitelist = None
1909

    
1910
    insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1911
                                         _ComputeNics(op, cluster, None,
1912
                                                      self.cfg, ec_id),
1913
                                         _ComputeFullBeParams(op, cluster),
1914
                                         node_whitelist)
1915
             for op in self.op.instances]
1916

    
1917
    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1918
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1919

    
1920
    ial.Run(self.op.iallocator)
1921

    
1922
    if not ial.success:
1923
      raise errors.OpPrereqError("Can't compute nodes using"
1924
                                 " iallocator '%s': %s" %
1925
                                 (self.op.iallocator, ial.info),
1926
                                 errors.ECODE_NORES)
1927

    
1928
    self.ia_result = ial.result
1929

    
1930
    if self.op.dry_run:
1931
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1932
        constants.JOB_IDS_KEY: [],
1933
        })
1934

    
1935
  def _ConstructPartialResult(self):
1936
    """Contructs the partial result.
1937

1938
    """
1939
    (allocatable, failed) = self.ia_result
1940
    return {
1941
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1942
        map(compat.fst, allocatable),
1943
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1944
      }
1945

    
1946
  def Exec(self, feedback_fn):
1947
    """Executes the opcode.
1948

1949
    """
1950
    op2inst = dict((op.instance_name, op) for op in self.op.instances)
1951
    (allocatable, failed) = self.ia_result
1952

    
1953
    jobs = []
1954
    for (name, node_names) in allocatable:
1955
      op = op2inst.pop(name)
1956

    
1957
      (op.pnode_uuid, op.pnode) = \
1958
        ExpandNodeUuidAndName(self.cfg, None, node_names[0])
1959
      if len(node_names) > 1:
1960
        (op.snode_uuid, op.snode) = \
1961
          ExpandNodeUuidAndName(self.cfg, None, node_names[1])
1962

    
1963
      jobs.append([op])
1964

    
1965
    missing = set(op2inst.keys()) - set(failed)
1966
    assert not missing, \
1967
      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1968

    
1969
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
1970

    
1971

    
1972
class _InstNicModPrivate:
1973
  """Data structure for network interface modifications.
1974

1975
  Used by L{LUInstanceSetParams}.
1976

1977
  """
1978
  def __init__(self):
1979
    self.params = None
1980
    self.filled = None
1981

    
1982

    
1983
def _PrepareContainerMods(mods, private_fn):
1984
  """Prepares a list of container modifications by adding a private data field.
1985

1986
  @type mods: list of tuples; (operation, index, parameters)
1987
  @param mods: List of modifications
1988
  @type private_fn: callable or None
1989
  @param private_fn: Callable for constructing a private data field for a
1990
    modification
1991
  @rtype: list
1992

1993
  """
1994
  if private_fn is None:
1995
    fn = lambda: None
1996
  else:
1997
    fn = private_fn
1998

    
1999
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2000

    
2001

    
2002
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2003
  """Checks if nodes have enough physical CPUs
2004

2005
  This function checks if all given nodes have the needed number of
2006
  physical CPUs. In case any node has less CPUs or we cannot get the
2007
  information from the node, this function raises an OpPrereqError
2008
  exception.
2009

2010
  @type lu: C{LogicalUnit}
2011
  @param lu: a logical unit from which we get configuration data
2012
  @type node_uuids: C{list}
2013
  @param node_uuids: the list of node UUIDs to check
2014
  @type requested: C{int}
2015
  @param requested: the minimum acceptable number of physical CPUs
2016
  @type hypervisor_specs: list of pairs (string, dict of strings)
2017
  @param hypervisor_specs: list of hypervisor specifications in
2018
      pairs (hypervisor_name, hvparams)
2019
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2020
      or we cannot check the node
2021

2022
  """
2023
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs, None)
2024
  for node_uuid in node_uuids:
2025
    info = nodeinfo[node_uuid]
2026
    node_name = lu.cfg.GetNodeName(node_uuid)
2027
    info.Raise("Cannot get current information from node %s" % node_name,
2028
               prereq=True, ecode=errors.ECODE_ENVIRON)
2029
    (_, _, (hv_info, )) = info.payload
2030
    num_cpus = hv_info.get("cpu_total", None)
2031
    if not isinstance(num_cpus, int):
2032
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2033
                                 " on node %s, result was '%s'" %
2034
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2035
    if requested > num_cpus:
2036
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2037
                                 "required" % (node_name, num_cpus, requested),
2038
                                 errors.ECODE_NORES)
2039

    
2040

    
2041
def GetItemFromContainer(identifier, kind, container):
2042
  """Return the item refered by the identifier.
2043

2044
  @type identifier: string
2045
  @param identifier: Item index or name or UUID
2046
  @type kind: string
2047
  @param kind: One-word item description
2048
  @type container: list
2049
  @param container: Container to get the item from
2050

2051
  """
2052
  # Index
2053
  try:
2054
    idx = int(identifier)
2055
    if idx == -1:
2056
      # Append
2057
      absidx = len(container) - 1
2058
    elif idx < 0:
2059
      raise IndexError("Not accepting negative indices other than -1")
2060
    elif idx > len(container):
2061
      raise IndexError("Got %s index %s, but there are only %s" %
2062
                       (kind, idx, len(container)))
2063
    else:
2064
      absidx = idx
2065
    return (absidx, container[idx])
2066
  except ValueError:
2067
    pass
2068

    
2069
  for idx, item in enumerate(container):
2070
    if item.uuid == identifier or item.name == identifier:
2071
      return (idx, item)
2072

    
2073
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2074
                             (kind, identifier), errors.ECODE_NOENT)
2075

    
2076

    
2077
def _ApplyContainerMods(kind, container, chgdesc, mods,
2078
                        create_fn, modify_fn, remove_fn):
2079
  """Applies descriptions in C{mods} to C{container}.
2080

2081
  @type kind: string
2082
  @param kind: One-word item description
2083
  @type container: list
2084
  @param container: Container to modify
2085
  @type chgdesc: None or list
2086
  @param chgdesc: List of applied changes
2087
  @type mods: list
2088
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2089
  @type create_fn: callable
2090
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2091
    receives absolute item index, parameters and private data object as added
2092
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2093
    as list
2094
  @type modify_fn: callable
2095
  @param modify_fn: Callback for modifying an existing item
2096
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2097
    and private data object as added by L{_PrepareContainerMods}, returns
2098
    changes as list
2099
  @type remove_fn: callable
2100
  @param remove_fn: Callback on removing item; receives absolute item index,
2101
    item and private data object as added by L{_PrepareContainerMods}
2102

2103
  """
2104
  for (op, identifier, params, private) in mods:
2105
    changes = None
2106

    
2107
    if op == constants.DDM_ADD:
2108
      # Calculate where item will be added
2109
      # When adding an item, identifier can only be an index
2110
      try:
2111
        idx = int(identifier)
2112
      except ValueError:
2113
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2114
                                   " identifier for %s" % constants.DDM_ADD,
2115
                                   errors.ECODE_INVAL)
2116
      if idx == -1:
2117
        addidx = len(container)
2118
      else:
2119
        if idx < 0:
2120
          raise IndexError("Not accepting negative indices other than -1")
2121
        elif idx > len(container):
2122
          raise IndexError("Got %s index %s, but there are only %s" %
2123
                           (kind, idx, len(container)))
2124
        addidx = idx
2125

    
2126
      if create_fn is None:
2127
        item = params
2128
      else:
2129
        (item, changes) = create_fn(addidx, params, private)
2130

    
2131
      if idx == -1:
2132
        container.append(item)
2133
      else:
2134
        assert idx >= 0
2135
        assert idx <= len(container)
2136
        # list.insert does so before the specified index
2137
        container.insert(idx, item)
2138
    else:
2139
      # Retrieve existing item
2140
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2141

    
2142
      if op == constants.DDM_REMOVE:
2143
        assert not params
2144

    
2145
        if remove_fn is not None:
2146
          remove_fn(absidx, item, private)
2147

    
2148
        changes = [("%s/%s" % (kind, absidx), "remove")]
2149

    
2150
        assert container[absidx] == item
2151
        del container[absidx]
2152
      elif op == constants.DDM_MODIFY:
2153
        if modify_fn is not None:
2154
          changes = modify_fn(absidx, item, params, private)
2155
      else:
2156
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2157

    
2158
    assert _TApplyContModsCbChanges(changes)
2159

    
2160
    if not (chgdesc is None or changes is None):
2161
      chgdesc.extend(changes)
2162

    
2163

    
2164
def _UpdateIvNames(base_index, disks):
2165
  """Updates the C{iv_name} attribute of disks.
2166

2167
  @type disks: list of L{objects.Disk}
2168

2169
  """
2170
  for (idx, disk) in enumerate(disks):
2171
    disk.iv_name = "disk/%s" % (base_index + idx, )
2172

    
2173

    
2174
class LUInstanceSetParams(LogicalUnit):
2175
  """Modifies an instances's parameters.
2176

2177
  """
2178
  HPATH = "instance-modify"
2179
  HTYPE = constants.HTYPE_INSTANCE
2180
  REQ_BGL = False
2181

    
2182
  @staticmethod
2183
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2184
    assert ht.TList(mods)
2185
    assert not mods or len(mods[0]) in (2, 3)
2186

    
2187
    if mods and len(mods[0]) == 2:
2188
      result = []
2189

    
2190
      addremove = 0
2191
      for op, params in mods:
2192
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2193
          result.append((op, -1, params))
2194
          addremove += 1
2195

    
2196
          if addremove > 1:
2197
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2198
                                       " supported at a time" % kind,
2199
                                       errors.ECODE_INVAL)
2200
        else:
2201
          result.append((constants.DDM_MODIFY, op, params))
2202

    
2203
      assert verify_fn(result)
2204
    else:
2205
      result = mods
2206

    
2207
    return result
2208

    
2209
  @staticmethod
2210
  def _CheckMods(kind, mods, key_types, item_fn):
2211
    """Ensures requested disk/NIC modifications are valid.
2212

2213
    """
2214
    for (op, _, params) in mods:
2215
      assert ht.TDict(params)
2216

    
2217
      # If 'key_types' is an empty dict, we assume we have an
2218
      # 'ext' template and thus do not ForceDictType
2219
      if key_types:
2220
        utils.ForceDictType(params, key_types)
2221

    
2222
      if op == constants.DDM_REMOVE:
2223
        if params:
2224
          raise errors.OpPrereqError("No settings should be passed when"
2225
                                     " removing a %s" % kind,
2226
                                     errors.ECODE_INVAL)
2227
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2228
        item_fn(op, params)
2229
      else:
2230
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2231

    
2232
  @staticmethod
2233
  def _VerifyDiskModification(op, params, excl_stor):
2234
    """Verifies a disk modification.
2235

2236
    """
2237
    if op == constants.DDM_ADD:
2238
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2239
      if mode not in constants.DISK_ACCESS_SET:
2240
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2241
                                   errors.ECODE_INVAL)
2242

    
2243
      size = params.get(constants.IDISK_SIZE, None)
2244
      if size is None:
2245
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2246
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2247

    
2248
      try:
2249
        size = int(size)
2250
      except (TypeError, ValueError), err:
2251
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2252
                                   errors.ECODE_INVAL)
2253

    
2254
      params[constants.IDISK_SIZE] = size
2255
      name = params.get(constants.IDISK_NAME, None)
2256
      if name is not None and name.lower() == constants.VALUE_NONE:
2257
        params[constants.IDISK_NAME] = None
2258

    
2259
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2260

    
2261
    elif op == constants.DDM_MODIFY:
2262
      if constants.IDISK_SIZE in params:
2263
        raise errors.OpPrereqError("Disk size change not possible, use"
2264
                                   " grow-disk", errors.ECODE_INVAL)
2265
      if len(params) > 2:
2266
        raise errors.OpPrereqError("Disk modification doesn't support"
2267
                                   " additional arbitrary parameters",
2268
                                   errors.ECODE_INVAL)
2269
      name = params.get(constants.IDISK_NAME, None)
2270
      if name is not None and name.lower() == constants.VALUE_NONE:
2271
        params[constants.IDISK_NAME] = None
2272

    
2273
  @staticmethod
2274
  def _VerifyNicModification(op, params):
2275
    """Verifies a network interface modification.
2276

2277
    """
2278
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2279
      ip = params.get(constants.INIC_IP, None)
2280
      name = params.get(constants.INIC_NAME, None)
2281
      req_net = params.get(constants.INIC_NETWORK, None)
2282
      link = params.get(constants.NIC_LINK, None)
2283
      mode = params.get(constants.NIC_MODE, None)
2284
      if name is not None and name.lower() == constants.VALUE_NONE:
2285
        params[constants.INIC_NAME] = None
2286
      if req_net is not None:
2287
        if req_net.lower() == constants.VALUE_NONE:
2288
          params[constants.INIC_NETWORK] = None
2289
          req_net = None
2290
        elif link is not None or mode is not None:
2291
          raise errors.OpPrereqError("If network is given"
2292
                                     " mode or link should not",
2293
                                     errors.ECODE_INVAL)
2294

    
2295
      if op == constants.DDM_ADD:
2296
        macaddr = params.get(constants.INIC_MAC, None)
2297
        if macaddr is None:
2298
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2299

    
2300
      if ip is not None:
2301
        if ip.lower() == constants.VALUE_NONE:
2302
          params[constants.INIC_IP] = None
2303
        else:
2304
          if ip.lower() == constants.NIC_IP_POOL:
2305
            if op == constants.DDM_ADD and req_net is None:
2306
              raise errors.OpPrereqError("If ip=pool, parameter network"
2307
                                         " cannot be none",
2308
                                         errors.ECODE_INVAL)
2309
          else:
2310
            if not netutils.IPAddress.IsValid(ip):
2311
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2312
                                         errors.ECODE_INVAL)
2313

    
2314
      if constants.INIC_MAC in params:
2315
        macaddr = params[constants.INIC_MAC]
2316
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2317
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2318

    
2319
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2320
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2321
                                     " modifying an existing NIC",
2322
                                     errors.ECODE_INVAL)
2323

    
2324
  def CheckArguments(self):
2325
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2326
            self.op.hvparams or self.op.beparams or self.op.os_name or
2327
            self.op.offline is not None or self.op.runtime_mem or
2328
            self.op.pnode):
2329
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2330

    
2331
    if self.op.hvparams:
2332
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2333
                           "hypervisor", "instance", "cluster")
2334

    
2335
    self.op.disks = self._UpgradeDiskNicMods(
2336
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2337
    self.op.nics = self._UpgradeDiskNicMods(
2338
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2339

    
2340
    if self.op.disks and self.op.disk_template is not None:
2341
      raise errors.OpPrereqError("Disk template conversion and other disk"
2342
                                 " changes not supported at the same time",
2343
                                 errors.ECODE_INVAL)
2344

    
2345
    if (self.op.disk_template and
2346
        self.op.disk_template in constants.DTS_INT_MIRROR and
2347
        self.op.remote_node is None):
2348
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2349
                                 " one requires specifying a secondary node",
2350
                                 errors.ECODE_INVAL)
2351

    
2352
    # Check NIC modifications
2353
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2354
                    self._VerifyNicModification)
2355

    
2356
    if self.op.pnode:
2357
      (self.op.pnode_uuid, self.op.pnode) = \
2358
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2359

    
2360
  def ExpandNames(self):
2361
    self._ExpandAndLockInstance()
2362
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2363
    # Can't even acquire node locks in shared mode as upcoming changes in
2364
    # Ganeti 2.6 will start to modify the node object on disk conversion
2365
    self.needed_locks[locking.LEVEL_NODE] = []
2366
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2367
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2368
    # Look node group to look up the ipolicy
2369
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2370

    
2371
  def DeclareLocks(self, level):
2372
    if level == locking.LEVEL_NODEGROUP:
2373
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2374
      # Acquire locks for the instance's nodegroups optimistically. Needs
2375
      # to be verified in CheckPrereq
2376
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2377
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2378
    elif level == locking.LEVEL_NODE:
2379
      self._LockInstancesNodes()
2380
      if self.op.disk_template and self.op.remote_node:
2381
        (self.op.remote_node_uuid, self.op.remote_node) = \
2382
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2383
                                self.op.remote_node)
2384
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2385
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2386
      # Copy node locks
2387
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2388
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2389

    
2390
  def BuildHooksEnv(self):
2391
    """Build hooks env.
2392

2393
    This runs on the master, primary and secondaries.
2394

2395
    """
2396
    args = {}
2397
    if constants.BE_MINMEM in self.be_new:
2398
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2399
    if constants.BE_MAXMEM in self.be_new:
2400
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2401
    if constants.BE_VCPUS in self.be_new:
2402
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2403
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2404
    # information at all.
2405

    
2406
    if self._new_nics is not None:
2407
      nics = []
2408

    
2409
      for nic in self._new_nics:
2410
        n = copy.deepcopy(nic)
2411
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2412
        n.nicparams = nicparams
2413
        nics.append(NICToTuple(self, n))
2414

    
2415
      args["nics"] = nics
2416

    
2417
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2418
    if self.op.disk_template:
2419
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2420
    if self.op.runtime_mem:
2421
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2422

    
2423
    return env
2424

    
2425
  def BuildHooksNodes(self):
2426
    """Build hooks nodes.
2427

2428
    """
2429
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2430
    return (nl, nl)
2431

    
2432
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2433
                              old_params, cluster, pnode_uuid):
2434

    
2435
    update_params_dict = dict([(key, params[key])
2436
                               for key in constants.NICS_PARAMETERS
2437
                               if key in params])
2438

    
2439
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2440
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2441

    
2442
    new_net_uuid = None
2443
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2444
    if new_net_uuid_or_name:
2445
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2446
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2447

    
2448
    if old_net_uuid:
2449
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2450

    
2451
    if new_net_uuid:
2452
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2453
      if not netparams:
2454
        raise errors.OpPrereqError("No netparams found for the network"
2455
                                   " %s, probably not connected" %
2456
                                   new_net_obj.name, errors.ECODE_INVAL)
2457
      new_params = dict(netparams)
2458
    else:
2459
      new_params = GetUpdatedParams(old_params, update_params_dict)
2460

    
2461
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2462

    
2463
    new_filled_params = cluster.SimpleFillNIC(new_params)
2464
    objects.NIC.CheckParameterSyntax(new_filled_params)
2465

    
2466
    new_mode = new_filled_params[constants.NIC_MODE]
2467
    if new_mode == constants.NIC_MODE_BRIDGED:
2468
      bridge = new_filled_params[constants.NIC_LINK]
2469
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2470
      if msg:
2471
        msg = "Error checking bridges on node '%s': %s" % \
2472
                (self.cfg.GetNodeName(pnode_uuid), msg)
2473
        if self.op.force:
2474
          self.warn.append(msg)
2475
        else:
2476
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2477

    
2478
    elif new_mode == constants.NIC_MODE_ROUTED:
2479
      ip = params.get(constants.INIC_IP, old_ip)
2480
      if ip is None:
2481
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2482
                                   " on a routed NIC", errors.ECODE_INVAL)
2483

    
2484
    elif new_mode == constants.NIC_MODE_OVS:
2485
      # TODO: check OVS link
2486
      self.LogInfo("OVS links are currently not checked for correctness")
2487

    
2488
    if constants.INIC_MAC in params:
2489
      mac = params[constants.INIC_MAC]
2490
      if mac is None:
2491
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2492
                                   errors.ECODE_INVAL)
2493
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2494
        # otherwise generate the MAC address
2495
        params[constants.INIC_MAC] = \
2496
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2497
      else:
2498
        # or validate/reserve the current one
2499
        try:
2500
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2501
        except errors.ReservationError:
2502
          raise errors.OpPrereqError("MAC address '%s' already in use"
2503
                                     " in cluster" % mac,
2504
                                     errors.ECODE_NOTUNIQUE)
2505
    elif new_net_uuid != old_net_uuid:
2506

    
2507
      def get_net_prefix(net_uuid):
2508
        mac_prefix = None
2509
        if net_uuid:
2510
          nobj = self.cfg.GetNetwork(net_uuid)
2511
          mac_prefix = nobj.mac_prefix
2512

    
2513
        return mac_prefix
2514

    
2515
      new_prefix = get_net_prefix(new_net_uuid)
2516
      old_prefix = get_net_prefix(old_net_uuid)
2517
      if old_prefix != new_prefix:
2518
        params[constants.INIC_MAC] = \
2519
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2520

    
2521
    # if there is a change in (ip, network) tuple
2522
    new_ip = params.get(constants.INIC_IP, old_ip)
2523
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2524
      if new_ip:
2525
        # if IP is pool then require a network and generate one IP
2526
        if new_ip.lower() == constants.NIC_IP_POOL:
2527
          if new_net_uuid:
2528
            try:
2529
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2530
            except errors.ReservationError:
2531
              raise errors.OpPrereqError("Unable to get a free IP"
2532
                                         " from the address pool",
2533
                                         errors.ECODE_STATE)
2534
            self.LogInfo("Chose IP %s from network %s",
2535
                         new_ip,
2536
                         new_net_obj.name)
2537
            params[constants.INIC_IP] = new_ip
2538
          else:
2539
            raise errors.OpPrereqError("ip=pool, but no network found",
2540
                                       errors.ECODE_INVAL)
2541
        # Reserve new IP if in the new network if any
2542
        elif new_net_uuid:
2543
          try:
2544
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2545
            self.LogInfo("Reserving IP %s in network %s",
2546
                         new_ip, new_net_obj.name)
2547
          except errors.ReservationError:
2548
            raise errors.OpPrereqError("IP %s not available in network %s" %
2549
                                       (new_ip, new_net_obj.name),
2550
                                       errors.ECODE_NOTUNIQUE)
2551
        # new network is None so check if new IP is a conflicting IP
2552
        elif self.op.conflicts_check:
2553
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2554

    
2555
      # release old IP if old network is not None
2556
      if old_ip and old_net_uuid:
2557
        try:
2558
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2559
        except errors.AddressPoolError:
2560
          logging.warning("Release IP %s not contained in network %s",
2561
                          old_ip, old_net_obj.name)
2562

    
2563
    # there are no changes in (ip, network) tuple and old network is not None
2564
    elif (old_net_uuid is not None and
2565
          (req_link is not None or req_mode is not None)):
2566
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2567
                                 " a NIC that is connected to a network",
2568
                                 errors.ECODE_INVAL)
2569

    
2570
    private.params = new_params
2571
    private.filled = new_filled_params
2572

    
2573
  def _PreCheckDiskTemplate(self, pnode_info):
2574
    """CheckPrereq checks related to a new disk template."""
2575
    # Arguments are passed to avoid configuration lookups
2576
    pnode_uuid = self.instance.primary_node
2577
    if self.instance.disk_template == self.op.disk_template:
2578
      raise errors.OpPrereqError("Instance already has disk template %s" %
2579
                                 self.instance.disk_template,
2580
                                 errors.ECODE_INVAL)
2581

    
2582
    if (self.instance.disk_template,
2583
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2584
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2585
                                 " %s to %s" % (self.instance.disk_template,
2586
                                                self.op.disk_template),
2587
                                 errors.ECODE_INVAL)
2588
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2589
                       msg="cannot change disk template")
2590
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2591
      if self.op.remote_node_uuid == pnode_uuid:
2592
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2593
                                   " as the primary node of the instance" %
2594
                                   self.op.remote_node, errors.ECODE_STATE)
2595
      CheckNodeOnline(self, self.op.remote_node_uuid)
2596
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2597
      # FIXME: here we assume that the old instance type is DT_PLAIN
2598
      assert self.instance.disk_template == constants.DT_PLAIN
2599
      disks = [{constants.IDISK_SIZE: d.size,
2600
                constants.IDISK_VG: d.logical_id[0]}
2601
               for d in self.instance.disks]
2602
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2603
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2604

    
2605
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2606
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2607
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2608
                                                              snode_group)
2609
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2610
                             ignore=self.op.ignore_ipolicy)
2611
      if pnode_info.group != snode_info.group:
2612
        self.LogWarning("The primary and secondary nodes are in two"
2613
                        " different node groups; the disk parameters"
2614
                        " from the first disk's node group will be"
2615
                        " used")
2616

    
2617
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2618
      # Make sure none of the nodes require exclusive storage
2619
      nodes = [pnode_info]
2620
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2621
        assert snode_info
2622
        nodes.append(snode_info)
2623
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2624
      if compat.any(map(has_es, nodes)):
2625
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2626
                  " storage is enabled" % (self.instance.disk_template,
2627
                                           self.op.disk_template))
2628
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2629

    
2630
  def _PreCheckDisks(self, ispec):
2631
    """CheckPrereq checks related to disk changes.
2632

2633
    @type ispec: dict
2634
    @param ispec: instance specs to be updated with the new disks
2635

2636
    """
2637
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2638

    
2639
    excl_stor = compat.any(
2640
      rpc.GetExclusiveStorageForNodes(self.cfg,
2641
                                      self.instance.all_nodes).values()
2642
      )
2643

    
2644
    # Check disk modifications. This is done here and not in CheckArguments
2645
    # (as with NICs), because we need to know the instance's disk template
2646
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2647
    if self.instance.disk_template == constants.DT_EXT:
2648
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2649
    else:
2650
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2651
                      ver_fn)
2652

    
2653
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2654

    
2655
    # Check the validity of the `provider' parameter
2656
    if self.instance.disk_template in constants.DT_EXT:
2657
      for mod in self.diskmod:
2658
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2659
        if mod[0] == constants.DDM_ADD:
2660
          if ext_provider is None:
2661
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2662
                                       " '%s' missing, during disk add" %
2663
                                       (constants.DT_EXT,
2664
                                        constants.IDISK_PROVIDER),
2665
                                       errors.ECODE_NOENT)
2666
        elif mod[0] == constants.DDM_MODIFY:
2667
          if ext_provider:
2668
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2669
                                       " modification" %
2670
                                       constants.IDISK_PROVIDER,
2671
                                       errors.ECODE_INVAL)
2672
    else:
2673
      for mod in self.diskmod:
2674
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2675
        if ext_provider is not None:
2676
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2677
                                     " instances of type '%s'" %
2678
                                     (constants.IDISK_PROVIDER,
2679
                                      constants.DT_EXT),
2680
                                     errors.ECODE_INVAL)
2681

    
2682
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2683
      raise errors.OpPrereqError("Disk operations not supported for"
2684
                                 " diskless instances", errors.ECODE_INVAL)
2685

    
2686
    def _PrepareDiskMod(_, disk, params, __):
2687
      disk.name = params.get(constants.IDISK_NAME, None)
2688

    
2689
    # Verify disk changes (operating on a copy)
2690
    disks = copy.deepcopy(self.instance.disks)
2691
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2692
                        _PrepareDiskMod, None)
2693
    utils.ValidateDeviceNames("disk", disks)
2694
    if len(disks) > constants.MAX_DISKS:
2695
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2696
                                 " more" % constants.MAX_DISKS,
2697
                                 errors.ECODE_STATE)
2698
    disk_sizes = [disk.size for disk in self.instance.disks]
2699
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2700
                      self.diskmod if op == constants.DDM_ADD)
2701
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2702
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2703

    
2704
    if self.op.offline is not None and self.op.offline:
2705
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2706
                         msg="can't change to offline")
2707

    
2708
  def CheckPrereq(self):
2709
    """Check prerequisites.
2710

2711
    This only checks the instance list against the existing names.
2712

2713
    """
2714
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2715
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2716
    self.cluster = self.cfg.GetClusterInfo()
2717

    
2718
    assert self.instance is not None, \
2719
      "Cannot retrieve locked instance %s" % self.op.instance_name
2720

    
2721
    pnode_uuid = self.instance.primary_node
2722

    
2723
    self.warn = []
2724

    
2725
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2726
        not self.op.force):
2727
      # verify that the instance is not up
2728
      instance_info = self.rpc.call_instance_info(
2729
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2730
          self.instance.hvparams)
2731
      if instance_info.fail_msg:
2732
        self.warn.append("Can't get instance runtime information: %s" %
2733
                         instance_info.fail_msg)
2734
      elif instance_info.payload:
2735
        raise errors.OpPrereqError("Instance is still running on %s" %
2736
                                   self.cfg.GetNodeName(pnode_uuid),
2737
                                   errors.ECODE_STATE)
2738

    
2739
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2740
    node_uuids = list(self.instance.all_nodes)
2741
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2742

    
2743
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2744
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2745
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2746

    
2747
    # dictionary with instance information after the modification
2748
    ispec = {}
2749

    
2750
    # Prepare NIC modifications
2751
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2752

    
2753
    # OS change
2754
    if self.op.os_name and not self.op.force:
2755
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2756
                     self.op.force_variant)
2757
      instance_os = self.op.os_name
2758
    else:
2759
      instance_os = self.instance.os
2760

    
2761
    assert not (self.op.disk_template and self.op.disks), \
2762
      "Can't modify disk template and apply disk changes at the same time"
2763

    
2764
    if self.op.disk_template:
2765
      self._PreCheckDiskTemplate(pnode_info)
2766

    
2767
    self._PreCheckDisks(ispec)
2768

    
2769
    # hvparams processing
2770
    if self.op.hvparams:
2771
      hv_type = self.instance.hypervisor
2772
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2773
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2774
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2775

    
2776
      # local check
2777
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2778
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2779
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2780
      self.hv_inst = i_hvdict # the new dict (without defaults)
2781
    else:
2782
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2783
                                                   self.instance.os,
2784
                                                   self.instance.hvparams)
2785
      self.hv_new = self.hv_inst = {}
2786

    
2787
    # beparams processing
2788
    if self.op.beparams:
2789
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2790
                                  use_none=True)
2791
      objects.UpgradeBeParams(i_bedict)
2792
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2793
      be_new = self.cluster.SimpleFillBE(i_bedict)
2794
      self.be_proposed = self.be_new = be_new # the new actual values
2795
      self.be_inst = i_bedict # the new dict (without defaults)
2796
    else:
2797
      self.be_new = self.be_inst = {}
2798
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2799
    be_old = self.cluster.FillBE(self.instance)
2800

    
2801
    # CPU param validation -- checking every time a parameter is
2802
    # changed to cover all cases where either CPU mask or vcpus have
2803
    # changed
2804
    if (constants.BE_VCPUS in self.be_proposed and
2805
        constants.HV_CPU_MASK in self.hv_proposed):
2806
      cpu_list = \
2807
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2808
      # Verify mask is consistent with number of vCPUs. Can skip this
2809
      # test if only 1 entry in the CPU mask, which means same mask
2810
      # is applied to all vCPUs.
2811
      if (len(cpu_list) > 1 and
2812
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2813
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2814
                                   " CPU mask [%s]" %
2815
                                   (self.be_proposed[constants.BE_VCPUS],
2816
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2817
                                   errors.ECODE_INVAL)
2818

    
2819
      # Only perform this test if a new CPU mask is given
2820
      if constants.HV_CPU_MASK in self.hv_new:
2821
        # Calculate the largest CPU number requested
2822
        max_requested_cpu = max(map(max, cpu_list))
2823
        # Check that all of the instance's nodes have enough physical CPUs to
2824
        # satisfy the requested CPU mask
2825
        hvspecs = [(self.instance.hypervisor,
2826
                    self.cfg.GetClusterInfo()
2827
                      .hvparams[self.instance.hypervisor])]
2828
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2829
                                max_requested_cpu + 1,
2830
                                hvspecs)
2831

    
2832
    # osparams processing
2833
    if self.op.osparams:
2834
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2835
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2836
      self.os_inst = i_osdict # the new dict (without defaults)
2837
    else:
2838
      self.os_inst = {}
2839

    
2840
    #TODO(dynmem): do the appropriate check involving MINMEM
2841
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2842
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2843
      mem_check_list = [pnode_uuid]
2844
      if be_new[constants.BE_AUTO_BALANCE]:
2845
        # either we changed auto_balance to yes or it was from before
2846
        mem_check_list.extend(self.instance.secondary_nodes)
2847
      instance_info = self.rpc.call_instance_info(
2848
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2849
          self.instance.hvparams)
2850
      hvspecs = [(self.instance.hypervisor,
2851
                  self.cluster.hvparams[self.instance.hypervisor])]
2852
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2853
                                         hvspecs, False)
2854
      pninfo = nodeinfo[pnode_uuid]
2855
      msg = pninfo.fail_msg
2856
      if msg:
2857
        # Assume the primary node is unreachable and go ahead
2858
        self.warn.append("Can't get info from primary node %s: %s" %
2859
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2860
      else:
2861
        (_, _, (pnhvinfo, )) = pninfo.payload
2862
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2863
          self.warn.append("Node data from primary node %s doesn't contain"
2864
                           " free memory information" %
2865
                           self.cfg.GetNodeName(pnode_uuid))
2866
        elif instance_info.fail_msg:
2867
          self.warn.append("Can't get instance runtime information: %s" %
2868
                           instance_info.fail_msg)
2869
        else:
2870
          if instance_info.payload:
2871
            current_mem = int(instance_info.payload["memory"])
2872
          else:
2873
            # Assume instance not running
2874
            # (there is a slight race condition here, but it's not very
2875
            # probable, and we have no other way to check)
2876
            # TODO: Describe race condition
2877
            current_mem = 0
2878
          #TODO(dynmem): do the appropriate check involving MINMEM
2879
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2880
                      pnhvinfo["memory_free"])
2881
          if miss_mem > 0:
2882
            raise errors.OpPrereqError("This change will prevent the instance"
2883
                                       " from starting, due to %d MB of memory"
2884
                                       " missing on its primary node" %
2885
                                       miss_mem, errors.ECODE_NORES)
2886

    
2887
      if be_new[constants.BE_AUTO_BALANCE]:
2888
        for node_uuid, nres in nodeinfo.items():
2889
          if node_uuid not in self.instance.secondary_nodes:
2890
            continue
2891
          nres.Raise("Can't get info from secondary node %s" %
2892
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2893
                     ecode=errors.ECODE_STATE)
2894
          (_, _, (nhvinfo, )) = nres.payload
2895
          if not isinstance(nhvinfo.get("memory_free", None), int):
2896
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2897
                                       " memory information" %
2898
                                       self.cfg.GetNodeName(node_uuid),
2899
                                       errors.ECODE_STATE)
2900
          #TODO(dynmem): do the appropriate check involving MINMEM
2901
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2902
            raise errors.OpPrereqError("This change will prevent the instance"
2903
                                       " from failover to its secondary node"
2904
                                       " %s, due to not enough memory" %
2905
                                       self.cfg.GetNodeName(node_uuid),
2906
                                       errors.ECODE_STATE)
2907

    
2908
    if self.op.runtime_mem:
2909
      remote_info = self.rpc.call_instance_info(
2910
         self.instance.primary_node, self.instance.name,
2911
         self.instance.hypervisor, self.instance.hvparams)
2912
      remote_info.Raise("Error checking node %s" %
2913
                        self.cfg.GetNodeName(self.instance.primary_node))
2914
      if not remote_info.payload: # not running already
2915
        raise errors.OpPrereqError("Instance %s is not running" %
2916
                                   self.instance.name, errors.ECODE_STATE)
2917

    
2918
      current_memory = remote_info.payload["memory"]
2919
      if (not self.op.force and
2920
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2921
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2922
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2923
                                   " and %d MB of memory unless --force is"
2924
                                   " given" %
2925
                                   (self.instance.name,
2926
                                    self.be_proposed[constants.BE_MINMEM],
2927
                                    self.be_proposed[constants.BE_MAXMEM]),
2928
                                   errors.ECODE_INVAL)
2929

    
2930
      delta = self.op.runtime_mem - current_memory
2931
      if delta > 0:
2932
        CheckNodeFreeMemory(
2933
            self, self.instance.primary_node,
2934
            "ballooning memory for instance %s" % self.instance.name, delta,
2935
            self.instance.hypervisor,
2936
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
2937

    
2938
    # make self.cluster visible in the functions below
2939
    cluster = self.cluster
2940

    
2941
    def _PrepareNicCreate(_, params, private):
2942
      self._PrepareNicModification(params, private, None, None,
2943
                                   {}, cluster, pnode_uuid)
2944
      return (None, None)
2945

    
2946
    def _PrepareNicMod(_, nic, params, private):
2947
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2948
                                   nic.nicparams, cluster, pnode_uuid)
2949
      return None
2950

    
2951
    def _PrepareNicRemove(_, params, __):
2952
      ip = params.ip
2953
      net = params.network
2954
      if net is not None and ip is not None:
2955
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2956

    
2957
    # Verify NIC changes (operating on copy)
2958
    nics = self.instance.nics[:]
2959
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2960
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2961
    if len(nics) > constants.MAX_NICS:
2962
      raise errors.OpPrereqError("Instance has too many network interfaces"
2963
                                 " (%d), cannot add more" % constants.MAX_NICS,
2964
                                 errors.ECODE_STATE)
2965

    
2966
    # Pre-compute NIC changes (necessary to use result in hooks)
2967
    self._nic_chgdesc = []
2968
    if self.nicmod:
2969
      # Operate on copies as this is still in prereq
2970
      nics = [nic.Copy() for nic in self.instance.nics]
2971
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2972
                          self._CreateNewNic, self._ApplyNicMods, None)
2973
      # Verify that NIC names are unique and valid
2974
      utils.ValidateDeviceNames("NIC", nics)
2975
      self._new_nics = nics
2976
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2977
    else:
2978
      self._new_nics = None
2979
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
2980

    
2981
    if not self.op.ignore_ipolicy:
2982
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2983
                                                              group_info)
2984

    
2985
      # Fill ispec with backend parameters
2986
      ispec[constants.ISPEC_SPINDLE_USE] = \
2987
        self.be_new.get(constants.BE_SPINDLE_USE, None)
2988
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2989
                                                         None)
2990

    
2991
      # Copy ispec to verify parameters with min/max values separately
2992
      if self.op.disk_template:
2993
        new_disk_template = self.op.disk_template
2994
      else:
2995
        new_disk_template = self.instance.disk_template
2996
      ispec_max = ispec.copy()
2997
      ispec_max[constants.ISPEC_MEM_SIZE] = \
2998
        self.be_new.get(constants.BE_MAXMEM, None)
2999
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3000
                                                     new_disk_template)
3001
      ispec_min = ispec.copy()
3002
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3003
        self.be_new.get(constants.BE_MINMEM, None)
3004
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3005
                                                     new_disk_template)
3006

    
3007
      if (res_max or res_min):
3008
        # FIXME: Improve error message by including information about whether
3009
        # the upper or lower limit of the parameter fails the ipolicy.
3010
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3011
               (group_info, group_info.name,
3012
                utils.CommaJoin(set(res_max + res_min))))
3013
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3014

    
3015
  def _ConvertPlainToDrbd(self, feedback_fn):
3016
    """Converts an instance from plain to drbd.
3017

3018
    """
3019
    feedback_fn("Converting template to drbd")
3020
    pnode_uuid = self.instance.primary_node
3021
    snode_uuid = self.op.remote_node_uuid
3022

    
3023
    assert self.instance.disk_template == constants.DT_PLAIN
3024

    
3025
    # create a fake disk info for _GenerateDiskTemplate
3026
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3027
                  constants.IDISK_VG: d.logical_id[0],
3028
                  constants.IDISK_NAME: d.name}
3029
                 for d in self.instance.disks]
3030
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3031
                                     self.instance.name, pnode_uuid,
3032
                                     [snode_uuid], disk_info, None, None, 0,
3033
                                     feedback_fn, self.diskparams)
3034
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3035
                                        self.diskparams)
3036
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3037
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3038
    info = GetInstanceInfoText(self.instance)
3039
    feedback_fn("Creating additional volumes...")
3040
    # first, create the missing data and meta devices
3041
    for disk in anno_disks:
3042
      # unfortunately this is... not too nice
3043
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3044
                           info, True, p_excl_stor)
3045
      for child in disk.children:
3046
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3047
                             s_excl_stor)
3048
    # at this stage, all new LVs have been created, we can rename the
3049
    # old ones
3050
    feedback_fn("Renaming original volumes...")
3051
    rename_list = [(o, n.children[0].logical_id)
3052
                   for (o, n) in zip(self.instance.disks, new_disks)]
3053
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3054
    result.Raise("Failed to rename original LVs")
3055

    
3056
    feedback_fn("Initializing DRBD devices...")
3057
    # all child devices are in place, we can now create the DRBD devices
3058
    try:
3059
      for disk in anno_disks:
3060
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3061
                                       (snode_uuid, s_excl_stor)]:
3062
          f_create = node_uuid == pnode_uuid
3063
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3064
                               f_create, excl_stor)
3065
    except errors.GenericError, e:
3066
      feedback_fn("Initializing of DRBD devices failed;"
3067
                  " renaming back original volumes...")
3068
      for disk in new_disks:
3069
        self.cfg.SetDiskID(disk, pnode_uuid)
3070
      rename_back_list = [(n.children[0], o.logical_id)
3071
                          for (n, o) in zip(new_disks, self.instance.disks)]
3072
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3073
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3074
      raise
3075

    
3076
    # at this point, the instance has been modified
3077
    self.instance.disk_template = constants.DT_DRBD8
3078
    self.instance.disks = new_disks
3079
    self.cfg.Update(self.instance, feedback_fn)
3080

    
3081
    # Release node locks while waiting for sync
3082
    ReleaseLocks(self, locking.LEVEL_NODE)
3083

    
3084
    # disks are created, waiting for sync
3085
    disk_abort = not WaitForSync(self, self.instance,
3086
                                 oneshot=not self.op.wait_for_sync)
3087
    if disk_abort:
3088
      raise errors.OpExecError("There are some degraded disks for"
3089
                               " this instance, please cleanup manually")
3090

    
3091
    # Node resource locks will be released by caller
3092

    
3093
  def _ConvertDrbdToPlain(self, feedback_fn):
3094
    """Converts an instance from drbd to plain.
3095

3096
    """
3097
    assert len(self.instance.secondary_nodes) == 1
3098
    assert self.instance.disk_template == constants.DT_DRBD8
3099

    
3100
    pnode_uuid = self.instance.primary_node
3101
    snode_uuid = self.instance.secondary_nodes[0]
3102
    feedback_fn("Converting template to plain")
3103

    
3104
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3105
    new_disks = [d.children[0] for d in self.instance.disks]
3106

    
3107
    # copy over size, mode and name
3108
    for parent, child in zip(old_disks, new_disks):
3109
      child.size = parent.size
3110
      child.mode = parent.mode
3111
      child.name = parent.name
3112

    
3113
    # this is a DRBD disk, return its port to the pool
3114
    # NOTE: this must be done right before the call to cfg.Update!
3115
    for disk in old_disks:
3116
      tcp_port = disk.logical_id[2]
3117
      self.cfg.AddTcpUdpPort(tcp_port)
3118

    
3119
    # update instance structure
3120
    self.instance.disks = new_disks
3121
    self.instance.disk_template = constants.DT_PLAIN
3122
    _UpdateIvNames(0, self.instance.disks)
3123
    self.cfg.Update(self.instance, feedback_fn)
3124

    
3125
    # Release locks in case removing disks takes a while
3126
    ReleaseLocks(self, locking.LEVEL_NODE)
3127

    
3128
    feedback_fn("Removing volumes on the secondary node...")
3129
    for disk in old_disks:
3130
      self.cfg.SetDiskID(disk, snode_uuid)
3131
      msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3132
      if msg:
3133
        self.LogWarning("Could not remove block device %s on node %s,"
3134
                        " continuing anyway: %s", disk.iv_name,
3135
                        self.cfg.GetNodeName(snode_uuid), msg)
3136

    
3137
    feedback_fn("Removing unneeded volumes on the primary node...")
3138
    for idx, disk in enumerate(old_disks):
3139
      meta = disk.children[1]
3140
      self.cfg.SetDiskID(meta, pnode_uuid)
3141
      msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3142
      if msg:
3143
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3144
                        " continuing anyway: %s", idx,
3145
                        self.cfg.GetNodeName(pnode_uuid), msg)
3146

    
3147
  def _CreateNewDisk(self, idx, params, _):
3148
    """Creates a new disk.
3149

3150
    """
3151
    # add a new disk
3152
    if self.instance.disk_template in constants.DTS_FILEBASED:
3153
      (file_driver, file_path) = self.instance.disks[0].logical_id
3154
      file_path = os.path.dirname(file_path)
3155
    else:
3156
      file_driver = file_path = None
3157

    
3158
    disk = \
3159
      GenerateDiskTemplate(self, self.instance.disk_template,
3160
                           self.instance.name, self.instance.primary_node,
3161
                           self.instance.secondary_nodes, [params], file_path,
3162
                           file_driver, idx, self.Log, self.diskparams)[0]
3163

    
3164
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3165

    
3166
    if self.cluster.prealloc_wipe_disks:
3167
      # Wipe new disk
3168
      WipeOrCleanupDisks(self, self.instance,
3169
                         disks=[(idx, disk, 0)],
3170
                         cleanup=new_disks)
3171

    
3172
    return (disk, [
3173
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3174
      ])
3175

    
3176
  @staticmethod
3177
  def _ModifyDisk(idx, disk, params, _):
3178
    """Modifies a disk.
3179

3180
    """
3181
    changes = []
3182
    mode = params.get(constants.IDISK_MODE, None)
3183
    if mode:
3184
      disk.mode = mode
3185
      changes.append(("disk.mode/%d" % idx, disk.mode))
3186

    
3187
    name = params.get(constants.IDISK_NAME, None)
3188
    disk.name = name
3189
    changes.append(("disk.name/%d" % idx, disk.name))
3190

    
3191
    return changes
3192

    
3193
  def _RemoveDisk(self, idx, root, _):
3194
    """Removes a disk.
3195

3196
    """
3197
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3198
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3199
                             self.instance.primary_node):
3200
      self.cfg.SetDiskID(disk, node_uuid)
3201
      msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3202
      if msg:
3203
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3204
                        " continuing anyway", idx,
3205
                        self.cfg.GetNodeName(node_uuid), msg)
3206

    
3207
    # if this is a DRBD disk, return its port to the pool
3208
    if root.dev_type in constants.LDS_DRBD:
3209
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3210

    
3211
  def _CreateNewNic(self, idx, params, private):
3212
    """Creates data structure for a new network interface.
3213

3214
    """
3215
    mac = params[constants.INIC_MAC]
3216
    ip = params.get(constants.INIC_IP, None)
3217
    net = params.get(constants.INIC_NETWORK, None)
3218
    name = params.get(constants.INIC_NAME, None)
3219
    net_uuid = self.cfg.LookupNetwork(net)
3220
    #TODO: not private.filled?? can a nic have no nicparams??
3221
    nicparams = private.filled
3222
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3223
                       nicparams=nicparams)
3224
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3225

    
3226
    return (nobj, [
3227
      ("nic.%d" % idx,
3228
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3229
       (mac, ip, private.filled[constants.NIC_MODE],
3230
       private.filled[constants.NIC_LINK],
3231
       net)),
3232
      ])
3233

    
3234
  def _ApplyNicMods(self, idx, nic, params, private):
3235
    """Modifies a network interface.
3236

3237
    """
3238
    changes = []
3239

    
3240
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3241
      if key in params:
3242
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3243
        setattr(nic, key, params[key])
3244

    
3245
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3246
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3247
    if new_net_uuid != nic.network:
3248
      changes.append(("nic.network/%d" % idx, new_net))
3249
      nic.network = new_net_uuid
3250

    
3251
    if private.filled:
3252
      nic.nicparams = private.filled
3253

    
3254
      for (key, val) in nic.nicparams.items():
3255
        changes.append(("nic.%s/%d" % (key, idx), val))
3256

    
3257
    return changes
3258

    
3259
  def Exec(self, feedback_fn):
3260
    """Modifies an instance.
3261

3262
    All parameters take effect only at the next restart of the instance.
3263

3264
    """
3265
    # Process here the warnings from CheckPrereq, as we don't have a
3266
    # feedback_fn there.
3267
    # TODO: Replace with self.LogWarning
3268
    for warn in self.warn:
3269
      feedback_fn("WARNING: %s" % warn)
3270

    
3271
    assert ((self.op.disk_template is None) ^
3272
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3273
      "Not owning any node resource locks"
3274

    
3275
    result = []
3276

    
3277
    # New primary node
3278
    if self.op.pnode_uuid:
3279
      self.instance.primary_node = self.op.pnode_uuid
3280

    
3281
    # runtime memory
3282
    if self.op.runtime_mem:
3283
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3284
                                                     self.instance,
3285
                                                     self.op.runtime_mem)
3286
      rpcres.Raise("Cannot modify instance runtime memory")
3287
      result.append(("runtime_memory", self.op.runtime_mem))
3288

    
3289
    # Apply disk changes
3290
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3291
                        self._CreateNewDisk, self._ModifyDisk,
3292
                        self._RemoveDisk)
3293
    _UpdateIvNames(0, self.instance.disks)
3294

    
3295
    if self.op.disk_template:
3296
      if __debug__:
3297
        check_nodes = set(self.instance.all_nodes)
3298
        if self.op.remote_node_uuid:
3299
          check_nodes.add(self.op.remote_node_uuid)
3300
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3301
          owned = self.owned_locks(level)
3302
          assert not (check_nodes - owned), \
3303
            ("Not owning the correct locks, owning %r, expected at least %r" %
3304
             (owned, check_nodes))
3305

    
3306
      r_shut = ShutdownInstanceDisks(self, self.instance)
3307
      if not r_shut:
3308
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3309
                                 " proceed with disk template conversion")
3310
      mode = (self.instance.disk_template, self.op.disk_template)
3311
      try:
3312
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3313
      except:
3314
        self.cfg.ReleaseDRBDMinors(self.instance.name)
3315
        raise
3316
      result.append(("disk_template", self.op.disk_template))
3317

    
3318
      assert self.instance.disk_template == self.op.disk_template, \
3319
        ("Expected disk template '%s', found '%s'" %
3320
         (self.op.disk_template, self.instance.disk_template))
3321

    
3322
    # Release node and resource locks if there are any (they might already have
3323
    # been released during disk conversion)
3324
    ReleaseLocks(self, locking.LEVEL_NODE)
3325
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3326

    
3327
    # Apply NIC changes
3328
    if self._new_nics is not None:
3329
      self.instance.nics = self._new_nics
3330
      result.extend(self._nic_chgdesc)
3331

    
3332
    # hvparams changes
3333
    if self.op.hvparams:
3334
      self.instance.hvparams = self.hv_inst
3335
      for key, val in self.op.hvparams.iteritems():
3336
        result.append(("hv/%s" % key, val))
3337

    
3338
    # beparams changes
3339
    if self.op.beparams:
3340
      self.instance.beparams = self.be_inst
3341
      for key, val in self.op.beparams.iteritems():
3342
        result.append(("be/%s" % key, val))
3343

    
3344
    # OS change
3345
    if self.op.os_name:
3346
      self.instance.os = self.op.os_name
3347

    
3348
    # osparams changes
3349
    if self.op.osparams:
3350
      self.instance.osparams = self.os_inst
3351
      for key, val in self.op.osparams.iteritems():
3352
        result.append(("os/%s" % key, val))
3353

    
3354
    if self.op.offline is None:
3355
      # Ignore
3356
      pass
3357
    elif self.op.offline:
3358
      # Mark instance as offline
3359
      self.cfg.MarkInstanceOffline(self.instance.name)
3360
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3361
    else:
3362
      # Mark instance as online, but stopped
3363
      self.cfg.MarkInstanceDown(self.instance.name)
3364
      result.append(("admin_state", constants.ADMINST_DOWN))
3365

    
3366
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3367

    
3368
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3369
                self.owned_locks(locking.LEVEL_NODE)), \
3370
      "All node locks should have been released by now"
3371

    
3372
    return result
3373

    
3374
  _DISK_CONVERSIONS = {
3375
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3376
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3377
    }
3378

    
3379

    
3380
class LUInstanceChangeGroup(LogicalUnit):
3381
  HPATH = "instance-change-group"
3382
  HTYPE = constants.HTYPE_INSTANCE
3383
  REQ_BGL = False
3384

    
3385
  def ExpandNames(self):
3386
    self.share_locks = ShareAll()
3387

    
3388
    self.needed_locks = {
3389
      locking.LEVEL_NODEGROUP: [],
3390
      locking.LEVEL_NODE: [],
3391
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3392
      }
3393

    
3394
    self._ExpandAndLockInstance()
3395

    
3396
    if self.op.target_groups:
3397
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3398
                                  self.op.target_groups)
3399
    else:
3400
      self.req_target_uuids = None
3401

    
3402
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3403

    
3404
  def DeclareLocks(self, level):
3405
    if level == locking.LEVEL_NODEGROUP:
3406
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3407

    
3408
      if self.req_target_uuids:
3409
        lock_groups = set(self.req_target_uuids)
3410

    
3411
        # Lock all groups used by instance optimistically; this requires going
3412
        # via the node before it's locked, requiring verification later on
3413
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3414
        lock_groups.update(instance_groups)
3415
      else:
3416
        # No target groups, need to lock all of them
3417
        lock_groups = locking.ALL_SET
3418

    
3419
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3420

    
3421
    elif level == locking.LEVEL_NODE:
3422
      if self.req_target_uuids:
3423
        # Lock all nodes used by instances
3424
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3425
        self._LockInstancesNodes()
3426

    
3427
        # Lock all nodes in all potential target groups
3428
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3429
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3430
        member_nodes = [node_uuid
3431
                        for group in lock_groups
3432
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3433
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3434
      else:
3435
        # Lock all nodes as all groups are potential targets
3436
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3437

    
3438
  def CheckPrereq(self):
3439
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3440
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3441
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3442

    
3443
    assert (self.req_target_uuids is None or
3444
            owned_groups.issuperset(self.req_target_uuids))
3445
    assert owned_instances == set([self.op.instance_name])
3446

    
3447
    # Get instance information
3448
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3449

    
3450
    # Check if node groups for locked instance are still correct
3451
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3452
      ("Instance %s's nodes changed while we kept the lock" %
3453
       self.op.instance_name)
3454

    
3455
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3456
                                          owned_groups)
3457

    
3458
    if self.req_target_uuids:
3459
      # User requested specific target groups
3460
      self.target_uuids = frozenset(self.req_target_uuids)
3461
    else:
3462
      # All groups except those used by the instance are potential targets
3463
      self.target_uuids = owned_groups - inst_groups
3464

    
3465
    conflicting_groups = self.target_uuids & inst_groups
3466
    if conflicting_groups:
3467
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3468
                                 " used by the instance '%s'" %
3469
                                 (utils.CommaJoin(conflicting_groups),
3470
                                  self.op.instance_name),
3471
                                 errors.ECODE_INVAL)
3472

    
3473
    if not self.target_uuids:
3474
      raise errors.OpPrereqError("There are no possible target groups",
3475
                                 errors.ECODE_INVAL)
3476

    
3477
  def BuildHooksEnv(self):
3478
    """Build hooks env.
3479

3480
    """
3481
    assert self.target_uuids
3482

    
3483
    env = {
3484
      "TARGET_GROUPS": " ".join(self.target_uuids),
3485
      }
3486

    
3487
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3488

    
3489
    return env
3490

    
3491
  def BuildHooksNodes(self):
3492
    """Build hooks nodes.
3493

3494
    """
3495
    mn = self.cfg.GetMasterNode()
3496
    return ([mn], [mn])
3497

    
3498
  def Exec(self, feedback_fn):
3499
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3500

    
3501
    assert instances == [self.op.instance_name], "Instance not locked"
3502

    
3503
    req = iallocator.IAReqGroupChange(instances=instances,
3504
                                      target_groups=list(self.target_uuids))
3505
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3506

    
3507
    ial.Run(self.op.iallocator)
3508

    
3509
    if not ial.success:
3510
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3511
                                 " instance '%s' using iallocator '%s': %s" %
3512
                                 (self.op.instance_name, self.op.iallocator,
3513
                                  ial.info), errors.ECODE_NORES)
3514

    
3515
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3516

    
3517
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3518
                 " instance '%s'", len(jobs), self.op.instance_name)
3519

    
3520
    return ResultWithJobs(jobs)