Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 7a1a3efd

History | View | Annotate | Download (137.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
64

    
65
import ganeti.masterd.instance
66

    
67

    
68
#: Type description for changes as returned by L{_ApplyContainerMods}'s
69
#: callbacks
70
_TApplyContModsCbChanges = \
71
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
72
    ht.TNonEmptyString,
73
    ht.TAny,
74
    ])))
75

    
76

    
77
def _CheckHostnameSane(lu, name):
78
  """Ensures that a given hostname resolves to a 'sane' name.
79

80
  The given name is required to be a prefix of the resolved hostname,
81
  to prevent accidental mismatches.
82

83
  @param lu: the logical unit on behalf of which we're checking
84
  @param name: the name we should resolve and check
85
  @return: the resolved hostname object
86

87
  """
88
  hostname = netutils.GetHostname(name=name)
89
  if hostname.name != name:
90
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91
  if not utils.MatchNameComponent(name, [hostname.name]):
92
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93
                                " same as given hostname '%s'") %
94
                               (hostname.name, name), errors.ECODE_INVAL)
95
  return hostname
96

    
97

    
98
def _CheckOpportunisticLocking(op):
99
  """Generate error if opportunistic locking is not possible.
100

101
  """
102
  if op.opportunistic_locking and not op.iallocator:
103
    raise errors.OpPrereqError("Opportunistic locking is only available in"
104
                               " combination with an instance allocator",
105
                               errors.ECODE_INVAL)
106

    
107

    
108
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109
  """Wrapper around IAReqInstanceAlloc.
110

111
  @param op: The instance opcode
112
  @param disks: The computed disks
113
  @param nics: The computed nics
114
  @param beparams: The full filled beparams
115
  @param node_whitelist: List of nodes which should appear as online to the
116
    allocator (unless the node is already marked offline)
117

118
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
119

120
  """
121
  spindle_use = beparams[constants.BE_SPINDLE_USE]
122
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123
                                       disk_template=op.disk_template,
124
                                       tags=op.tags,
125
                                       os=op.os_type,
126
                                       vcpus=beparams[constants.BE_VCPUS],
127
                                       memory=beparams[constants.BE_MAXMEM],
128
                                       spindle_use=spindle_use,
129
                                       disks=disks,
130
                                       nics=[n.ToDict() for n in nics],
131
                                       hypervisor=op.hypervisor,
132
                                       node_whitelist=node_whitelist)
133

    
134

    
135
def _ComputeFullBeParams(op, cluster):
136
  """Computes the full beparams.
137

138
  @param op: The instance opcode
139
  @param cluster: The cluster config object
140

141
  @return: The fully filled beparams
142

143
  """
144
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
145
  for param, value in op.beparams.iteritems():
146
    if value == constants.VALUE_AUTO:
147
      op.beparams[param] = default_beparams[param]
148
  objects.UpgradeBeParams(op.beparams)
149
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150
  return cluster.SimpleFillBE(op.beparams)
151

    
152

    
153
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154
  """Computes the nics.
155

156
  @param op: The instance opcode
157
  @param cluster: Cluster configuration object
158
  @param default_ip: The default ip to assign
159
  @param cfg: An instance of the configuration object
160
  @param ec_id: Execution context ID
161

162
  @returns: The build up nics
163

164
  """
165
  nics = []
166
  for nic in op.nics:
167
    nic_mode_req = nic.get(constants.INIC_MODE, None)
168
    nic_mode = nic_mode_req
169
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
171

    
172
    net = nic.get(constants.INIC_NETWORK, None)
173
    link = nic.get(constants.NIC_LINK, None)
174
    ip = nic.get(constants.INIC_IP, None)
175

    
176
    if net is None or net.lower() == constants.VALUE_NONE:
177
      net = None
178
    else:
179
      if nic_mode_req is not None or link is not None:
180
        raise errors.OpPrereqError("If network is given, no mode or link"
181
                                   " is allowed to be passed",
182
                                   errors.ECODE_INVAL)
183

    
184
    # ip validity checks
185
    if ip is None or ip.lower() == constants.VALUE_NONE:
186
      nic_ip = None
187
    elif ip.lower() == constants.VALUE_AUTO:
188
      if not op.name_check:
189
        raise errors.OpPrereqError("IP address set to auto but name checks"
190
                                   " have been skipped",
191
                                   errors.ECODE_INVAL)
192
      nic_ip = default_ip
193
    else:
194
      # We defer pool operations until later, so that the iallocator has
195
      # filled in the instance's node(s) dimara
196
      if ip.lower() == constants.NIC_IP_POOL:
197
        if net is None:
198
          raise errors.OpPrereqError("if ip=pool, parameter network"
199
                                     " must be passed too",
200
                                     errors.ECODE_INVAL)
201

    
202
      elif not netutils.IPAddress.IsValid(ip):
203
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
204
                                   errors.ECODE_INVAL)
205

    
206
      nic_ip = ip
207

    
208
    # TODO: check the ip address for uniqueness
209
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
211
                                 errors.ECODE_INVAL)
212

    
213
    # MAC address verification
214
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216
      mac = utils.NormalizeAndValidateMac(mac)
217

    
218
      try:
219
        # TODO: We need to factor this out
220
        cfg.ReserveMAC(mac, ec_id)
221
      except errors.ReservationError:
222
        raise errors.OpPrereqError("MAC address %s already in use"
223
                                   " in cluster" % mac,
224
                                   errors.ECODE_NOTUNIQUE)
225

    
226
    #  Build nic parameters
227
    nicparams = {}
228
    if nic_mode_req:
229
      nicparams[constants.NIC_MODE] = nic_mode
230
    if link:
231
      nicparams[constants.NIC_LINK] = link
232

    
233
    check_params = cluster.SimpleFillNIC(nicparams)
234
    objects.NIC.CheckParameterSyntax(check_params)
235
    net_uuid = cfg.LookupNetwork(net)
236
    name = nic.get(constants.INIC_NAME, None)
237
    if name is not None and name.lower() == constants.VALUE_NONE:
238
      name = None
239
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240
                          network=net_uuid, nicparams=nicparams)
241
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
242
    nics.append(nic_obj)
243

    
244
  return nics
245

    
246

    
247
def _CheckForConflictingIp(lu, ip, node):
248
  """In case of conflicting IP address raise error.
249

250
  @type ip: string
251
  @param ip: IP address
252
  @type node: string
253
  @param node: node name
254

255
  """
256
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257
  if conf_net is not None:
258
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259
                                " network %s, but the target NIC does not." %
260
                                (ip, conf_net)),
261
                               errors.ECODE_STATE)
262

    
263
  return (None, None)
264

    
265

    
266
def _ComputeIPolicyInstanceSpecViolation(
267
  ipolicy, instance_spec, disk_template,
268
  _compute_fn=ComputeIPolicySpecViolation):
269
  """Compute if instance specs meets the specs of ipolicy.
270

271
  @type ipolicy: dict
272
  @param ipolicy: The ipolicy to verify against
273
  @param instance_spec: dict
274
  @param instance_spec: The instance spec to verify
275
  @type disk_template: string
276
  @param disk_template: the disk template of the instance
277
  @param _compute_fn: The function to verify ipolicy (unittest only)
278
  @see: L{ComputeIPolicySpecViolation}
279

280
  """
281
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
287

    
288
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289
                     disk_sizes, spindle_use, disk_template)
290

    
291

    
292
def _CheckOSVariant(os_obj, name):
293
  """Check whether an OS name conforms to the os variants specification.
294

295
  @type os_obj: L{objects.OS}
296
  @param os_obj: OS object to check
297
  @type name: string
298
  @param name: OS name passed by the user, to check for validity
299

300
  """
301
  variant = objects.OS.GetVariant(name)
302
  if not os_obj.supported_variants:
303
    if variant:
304
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305
                                 " passed)" % (os_obj.name, variant),
306
                                 errors.ECODE_INVAL)
307
    return
308
  if not variant:
309
    raise errors.OpPrereqError("OS name must include a variant",
310
                               errors.ECODE_INVAL)
311

    
312
  if variant not in os_obj.supported_variants:
313
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
314

    
315

    
316
class LUInstanceCreate(LogicalUnit):
317
  """Create an instance.
318

319
  """
320
  HPATH = "instance-add"
321
  HTYPE = constants.HTYPE_INSTANCE
322
  REQ_BGL = False
323

    
324
  def CheckArguments(self):
325
    """Check arguments.
326

327
    """
328
    # do not require name_check to ease forward/backward compatibility
329
    # for tools
330
    if self.op.no_install and self.op.start:
331
      self.LogInfo("No-installation mode selected, disabling startup")
332
      self.op.start = False
333
    # validate/normalize the instance name
334
    self.op.instance_name = \
335
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
336

    
337
    if self.op.ip_check and not self.op.name_check:
338
      # TODO: make the ip check more flexible and not depend on the name check
339
      raise errors.OpPrereqError("Cannot do IP address check without a name"
340
                                 " check", errors.ECODE_INVAL)
341

    
342
    # check nics' parameter names
343
    for nic in self.op.nics:
344
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345
    # check that NIC's parameters names are unique and valid
346
    utils.ValidateDeviceNames("NIC", self.op.nics)
347

    
348
    # check that disk's names are unique and valid
349
    utils.ValidateDeviceNames("disk", self.op.disks)
350

    
351
    cluster = self.cfg.GetClusterInfo()
352
    if not self.op.disk_template in cluster.enabled_disk_templates:
353
      raise errors.OpPrereqError("Cannot create an instance with disk template"
354
                                 " '%s', because it is not enabled in the"
355
                                 " cluster. Enabled disk templates are: %s." %
356
                                 (self.op.disk_template,
357
                                  ",".join(cluster.enabled_disk_templates)))
358

    
359
    # check disks. parameter names and consistent adopt/no-adopt strategy
360
    has_adopt = has_no_adopt = False
361
    for disk in self.op.disks:
362
      if self.op.disk_template != constants.DT_EXT:
363
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364
      if constants.IDISK_ADOPT in disk:
365
        has_adopt = True
366
      else:
367
        has_no_adopt = True
368
    if has_adopt and has_no_adopt:
369
      raise errors.OpPrereqError("Either all disks are adopted or none is",
370
                                 errors.ECODE_INVAL)
371
    if has_adopt:
372
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373
        raise errors.OpPrereqError("Disk adoption is not supported for the"
374
                                   " '%s' disk template" %
375
                                   self.op.disk_template,
376
                                   errors.ECODE_INVAL)
377
      if self.op.iallocator is not None:
378
        raise errors.OpPrereqError("Disk adoption not allowed with an"
379
                                   " iallocator script", errors.ECODE_INVAL)
380
      if self.op.mode == constants.INSTANCE_IMPORT:
381
        raise errors.OpPrereqError("Disk adoption not allowed for"
382
                                   " instance import", errors.ECODE_INVAL)
383
    else:
384
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
385
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386
                                   " but no 'adopt' parameter given" %
387
                                   self.op.disk_template,
388
                                   errors.ECODE_INVAL)
389

    
390
    self.adopt_disks = has_adopt
391

    
392
    # instance name verification
393
    if self.op.name_check:
394
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395
      self.op.instance_name = self.hostname1.name
396
      # used in CheckPrereq for ip ping check
397
      self.check_ip = self.hostname1.ip
398
    else:
399
      self.check_ip = None
400

    
401
    # file storage checks
402
    if (self.op.file_driver and
403
        not self.op.file_driver in constants.FILE_DRIVER):
404
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
405
                                 self.op.file_driver, errors.ECODE_INVAL)
406

    
407
    # set default file_driver if unset and required
408
    if (not self.op.file_driver and
409
        self.op.disk_template in [constants.DT_FILE,
410
                                  constants.DT_SHARED_FILE]):
411
      self.op.file_driver = constants.FD_LOOP
412

    
413
    if self.op.disk_template == constants.DT_FILE:
414
      opcodes.RequireFileStorage()
415
    elif self.op.disk_template == constants.DT_SHARED_FILE:
416
      opcodes.RequireSharedFileStorage()
417

    
418
    ### Node/iallocator related checks
419
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
420

    
421
    if self.op.pnode is not None:
422
      if self.op.disk_template in constants.DTS_INT_MIRROR:
423
        if self.op.snode is None:
424
          raise errors.OpPrereqError("The networked disk templates need"
425
                                     " a mirror node", errors.ECODE_INVAL)
426
      elif self.op.snode:
427
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
428
                        " template")
429
        self.op.snode = None
430

    
431
    _CheckOpportunisticLocking(self.op)
432

    
433
    self._cds = GetClusterDomainSecret()
434

    
435
    if self.op.mode == constants.INSTANCE_IMPORT:
436
      # On import force_variant must be True, because if we forced it at
437
      # initial install, our only chance when importing it back is that it
438
      # works again!
439
      self.op.force_variant = True
440

    
441
      if self.op.no_install:
442
        self.LogInfo("No-installation mode has no effect during import")
443

    
444
    elif self.op.mode == constants.INSTANCE_CREATE:
445
      if self.op.os_type is None:
446
        raise errors.OpPrereqError("No guest OS specified",
447
                                   errors.ECODE_INVAL)
448
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
449
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
450
                                   " installation" % self.op.os_type,
451
                                   errors.ECODE_STATE)
452
      if self.op.disk_template is None:
453
        raise errors.OpPrereqError("No disk template specified",
454
                                   errors.ECODE_INVAL)
455

    
456
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
457
      # Check handshake to ensure both clusters have the same domain secret
458
      src_handshake = self.op.source_handshake
459
      if not src_handshake:
460
        raise errors.OpPrereqError("Missing source handshake",
461
                                   errors.ECODE_INVAL)
462

    
463
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
464
                                                           src_handshake)
465
      if errmsg:
466
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
467
                                   errors.ECODE_INVAL)
468

    
469
      # Load and check source CA
470
      self.source_x509_ca_pem = self.op.source_x509_ca
471
      if not self.source_x509_ca_pem:
472
        raise errors.OpPrereqError("Missing source X509 CA",
473
                                   errors.ECODE_INVAL)
474

    
475
      try:
476
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
477
                                                    self._cds)
478
      except OpenSSL.crypto.Error, err:
479
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
480
                                   (err, ), errors.ECODE_INVAL)
481

    
482
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
483
      if errcode is not None:
484
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
485
                                   errors.ECODE_INVAL)
486

    
487
      self.source_x509_ca = cert
488

    
489
      src_instance_name = self.op.source_instance_name
490
      if not src_instance_name:
491
        raise errors.OpPrereqError("Missing source instance name",
492
                                   errors.ECODE_INVAL)
493

    
494
      self.source_instance_name = \
495
        netutils.GetHostname(name=src_instance_name).name
496

    
497
    else:
498
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
499
                                 self.op.mode, errors.ECODE_INVAL)
500

    
501
  def ExpandNames(self):
502
    """ExpandNames for CreateInstance.
503

504
    Figure out the right locks for instance creation.
505

506
    """
507
    self.needed_locks = {}
508

    
509
    instance_name = self.op.instance_name
510
    # this is just a preventive check, but someone might still add this
511
    # instance in the meantime, and creation will fail at lock-add time
512
    if instance_name in self.cfg.GetInstanceList():
513
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
514
                                 instance_name, errors.ECODE_EXISTS)
515

    
516
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
517

    
518
    if self.op.iallocator:
519
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
520
      # specifying a group on instance creation and then selecting nodes from
521
      # that group
522
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
523
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
524

    
525
      if self.op.opportunistic_locking:
526
        self.opportunistic_locks[locking.LEVEL_NODE] = True
527
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
528
    else:
529
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
530
      nodelist = [self.op.pnode]
531
      if self.op.snode is not None:
532
        self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
533
        nodelist.append(self.op.snode)
534
      self.needed_locks[locking.LEVEL_NODE] = nodelist
535

    
536
    # in case of import lock the source node too
537
    if self.op.mode == constants.INSTANCE_IMPORT:
538
      src_node = self.op.src_node
539
      src_path = self.op.src_path
540

    
541
      if src_path is None:
542
        self.op.src_path = src_path = self.op.instance_name
543

    
544
      if src_node is None:
545
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
546
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
547
        self.op.src_node = None
548
        if os.path.isabs(src_path):
549
          raise errors.OpPrereqError("Importing an instance from a path"
550
                                     " requires a source node option",
551
                                     errors.ECODE_INVAL)
552
      else:
553
        self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
554
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
555
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
556
        if not os.path.isabs(src_path):
557
          self.op.src_path = src_path = \
558
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
559

    
560
    self.needed_locks[locking.LEVEL_NODE_RES] = \
561
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
562

    
563
  def _RunAllocator(self):
564
    """Run the allocator based on input opcode.
565

566
    """
567
    if self.op.opportunistic_locking:
568
      # Only consider nodes for which a lock is held
569
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
570
    else:
571
      node_whitelist = None
572

    
573
    #TODO Export network to iallocator so that it chooses a pnode
574
    #     in a nodegroup that has the desired network connected to
575
    req = _CreateInstanceAllocRequest(self.op, self.disks,
576
                                      self.nics, self.be_full,
577
                                      node_whitelist)
578
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
579

    
580
    ial.Run(self.op.iallocator)
581

    
582
    if not ial.success:
583
      # When opportunistic locks are used only a temporary failure is generated
584
      if self.op.opportunistic_locking:
585
        ecode = errors.ECODE_TEMP_NORES
586
      else:
587
        ecode = errors.ECODE_NORES
588

    
589
      raise errors.OpPrereqError("Can't compute nodes using"
590
                                 " iallocator '%s': %s" %
591
                                 (self.op.iallocator, ial.info),
592
                                 ecode)
593

    
594
    self.op.pnode = ial.result[0]
595
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
596
                 self.op.instance_name, self.op.iallocator,
597
                 utils.CommaJoin(ial.result))
598

    
599
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
600

    
601
    if req.RequiredNodes() == 2:
602
      self.op.snode = ial.result[1]
603

    
604
  def BuildHooksEnv(self):
605
    """Build hooks env.
606

607
    This runs on master, primary and secondary nodes of the instance.
608

609
    """
610
    env = {
611
      "ADD_MODE": self.op.mode,
612
      }
613
    if self.op.mode == constants.INSTANCE_IMPORT:
614
      env["SRC_NODE"] = self.op.src_node
615
      env["SRC_PATH"] = self.op.src_path
616
      env["SRC_IMAGES"] = self.src_images
617

    
618
    env.update(BuildInstanceHookEnv(
619
      name=self.op.instance_name,
620
      primary_node=self.op.pnode,
621
      secondary_nodes=self.secondaries,
622
      status=self.op.start,
623
      os_type=self.op.os_type,
624
      minmem=self.be_full[constants.BE_MINMEM],
625
      maxmem=self.be_full[constants.BE_MAXMEM],
626
      vcpus=self.be_full[constants.BE_VCPUS],
627
      nics=NICListToTuple(self, self.nics),
628
      disk_template=self.op.disk_template,
629
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
630
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
631
             for d in self.disks],
632
      bep=self.be_full,
633
      hvp=self.hv_full,
634
      hypervisor_name=self.op.hypervisor,
635
      tags=self.op.tags,
636
      ))
637

    
638
    return env
639

    
640
  def BuildHooksNodes(self):
641
    """Build hooks nodes.
642

643
    """
644
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
645
    return nl, nl
646

    
647
  def _ReadExportInfo(self):
648
    """Reads the export information from disk.
649

650
    It will override the opcode source node and path with the actual
651
    information, if these two were not specified before.
652

653
    @return: the export information
654

655
    """
656
    assert self.op.mode == constants.INSTANCE_IMPORT
657

    
658
    src_node = self.op.src_node
659
    src_path = self.op.src_path
660

    
661
    if src_node is None:
662
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
663
      exp_list = self.rpc.call_export_list(locked_nodes)
664
      found = False
665
      for node in exp_list:
666
        if exp_list[node].fail_msg:
667
          continue
668
        if src_path in exp_list[node].payload:
669
          found = True
670
          self.op.src_node = src_node = node
671
          self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
672
                                                       src_path)
673
          break
674
      if not found:
675
        raise errors.OpPrereqError("No export found for relative path %s" %
676
                                   src_path, errors.ECODE_INVAL)
677

    
678
    CheckNodeOnline(self, src_node)
679
    result = self.rpc.call_export_info(src_node, src_path)
680
    result.Raise("No export or invalid export found in dir %s" % src_path)
681

    
682
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
683
    if not export_info.has_section(constants.INISECT_EXP):
684
      raise errors.ProgrammerError("Corrupted export config",
685
                                   errors.ECODE_ENVIRON)
686

    
687
    ei_version = export_info.get(constants.INISECT_EXP, "version")
688
    if (int(ei_version) != constants.EXPORT_VERSION):
689
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
690
                                 (ei_version, constants.EXPORT_VERSION),
691
                                 errors.ECODE_ENVIRON)
692
    return export_info
693

    
694
  def _ReadExportParams(self, einfo):
695
    """Use export parameters as defaults.
696

697
    In case the opcode doesn't specify (as in override) some instance
698
    parameters, then try to use them from the export information, if
699
    that declares them.
700

701
    """
702
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
703

    
704
    if self.op.disk_template is None:
705
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
706
        self.op.disk_template = einfo.get(constants.INISECT_INS,
707
                                          "disk_template")
708
        if self.op.disk_template not in constants.DISK_TEMPLATES:
709
          raise errors.OpPrereqError("Disk template specified in configuration"
710
                                     " file is not one of the allowed values:"
711
                                     " %s" %
712
                                     " ".join(constants.DISK_TEMPLATES),
713
                                     errors.ECODE_INVAL)
714
      else:
715
        raise errors.OpPrereqError("No disk template specified and the export"
716
                                   " is missing the disk_template information",
717
                                   errors.ECODE_INVAL)
718

    
719
    if not self.op.disks:
720
      disks = []
721
      # TODO: import the disk iv_name too
722
      for idx in range(constants.MAX_DISKS):
723
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
724
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
725
          disks.append({constants.IDISK_SIZE: disk_sz})
726
      self.op.disks = disks
727
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
728
        raise errors.OpPrereqError("No disk info specified and the export"
729
                                   " is missing the disk information",
730
                                   errors.ECODE_INVAL)
731

    
732
    if not self.op.nics:
733
      nics = []
734
      for idx in range(constants.MAX_NICS):
735
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
736
          ndict = {}
737
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
738
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
739
            ndict[name] = v
740
          nics.append(ndict)
741
        else:
742
          break
743
      self.op.nics = nics
744

    
745
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
746
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
747

    
748
    if (self.op.hypervisor is None and
749
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
750
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
751

    
752
    if einfo.has_section(constants.INISECT_HYP):
753
      # use the export parameters but do not override the ones
754
      # specified by the user
755
      for name, value in einfo.items(constants.INISECT_HYP):
756
        if name not in self.op.hvparams:
757
          self.op.hvparams[name] = value
758

    
759
    if einfo.has_section(constants.INISECT_BEP):
760
      # use the parameters, without overriding
761
      for name, value in einfo.items(constants.INISECT_BEP):
762
        if name not in self.op.beparams:
763
          self.op.beparams[name] = value
764
        # Compatibility for the old "memory" be param
765
        if name == constants.BE_MEMORY:
766
          if constants.BE_MAXMEM not in self.op.beparams:
767
            self.op.beparams[constants.BE_MAXMEM] = value
768
          if constants.BE_MINMEM not in self.op.beparams:
769
            self.op.beparams[constants.BE_MINMEM] = value
770
    else:
771
      # try to read the parameters old style, from the main section
772
      for name in constants.BES_PARAMETERS:
773
        if (name not in self.op.beparams and
774
            einfo.has_option(constants.INISECT_INS, name)):
775
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
776

    
777
    if einfo.has_section(constants.INISECT_OSP):
778
      # use the parameters, without overriding
779
      for name, value in einfo.items(constants.INISECT_OSP):
780
        if name not in self.op.osparams:
781
          self.op.osparams[name] = value
782

    
783
  def _RevertToDefaults(self, cluster):
784
    """Revert the instance parameters to the default values.
785

786
    """
787
    # hvparams
788
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
789
    for name in self.op.hvparams.keys():
790
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
791
        del self.op.hvparams[name]
792
    # beparams
793
    be_defs = cluster.SimpleFillBE({})
794
    for name in self.op.beparams.keys():
795
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
796
        del self.op.beparams[name]
797
    # nic params
798
    nic_defs = cluster.SimpleFillNIC({})
799
    for nic in self.op.nics:
800
      for name in constants.NICS_PARAMETERS:
801
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
802
          del nic[name]
803
    # osparams
804
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
805
    for name in self.op.osparams.keys():
806
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
807
        del self.op.osparams[name]
808

    
809
  def _CalculateFileStorageDir(self):
810
    """Calculate final instance file storage dir.
811

812
    """
813
    # file storage dir calculation/check
814
    self.instance_file_storage_dir = None
815
    if self.op.disk_template in constants.DTS_FILEBASED:
816
      # build the full file storage dir path
817
      joinargs = []
818

    
819
      if self.op.disk_template == constants.DT_SHARED_FILE:
820
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
821
      else:
822
        get_fsd_fn = self.cfg.GetFileStorageDir
823

    
824
      cfg_storagedir = get_fsd_fn()
825
      if not cfg_storagedir:
826
        raise errors.OpPrereqError("Cluster file storage dir not defined",
827
                                   errors.ECODE_STATE)
828
      joinargs.append(cfg_storagedir)
829

    
830
      if self.op.file_storage_dir is not None:
831
        joinargs.append(self.op.file_storage_dir)
832

    
833
      joinargs.append(self.op.instance_name)
834

    
835
      # pylint: disable=W0142
836
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
837

    
838
  def CheckPrereq(self): # pylint: disable=R0914
839
    """Check prerequisites.
840

841
    """
842
    self._CalculateFileStorageDir()
843

    
844
    if self.op.mode == constants.INSTANCE_IMPORT:
845
      export_info = self._ReadExportInfo()
846
      self._ReadExportParams(export_info)
847
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
848
    else:
849
      self._old_instance_name = None
850

    
851
    if (not self.cfg.GetVGName() and
852
        self.op.disk_template not in constants.DTS_NOT_LVM):
853
      raise errors.OpPrereqError("Cluster does not support lvm-based"
854
                                 " instances", errors.ECODE_STATE)
855

    
856
    if (self.op.hypervisor is None or
857
        self.op.hypervisor == constants.VALUE_AUTO):
858
      self.op.hypervisor = self.cfg.GetHypervisorType()
859

    
860
    cluster = self.cfg.GetClusterInfo()
861
    enabled_hvs = cluster.enabled_hypervisors
862
    if self.op.hypervisor not in enabled_hvs:
863
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
864
                                 " cluster (%s)" %
865
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
866
                                 errors.ECODE_STATE)
867

    
868
    # Check tag validity
869
    for tag in self.op.tags:
870
      objects.TaggableObject.ValidateTag(tag)
871

    
872
    # check hypervisor parameter syntax (locally)
873
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
874
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
875
                                      self.op.hvparams)
876
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
877
    hv_type.CheckParameterSyntax(filled_hvp)
878
    self.hv_full = filled_hvp
879
    # check that we don't specify global parameters on an instance
880
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
881
                         "instance", "cluster")
882

    
883
    # fill and remember the beparams dict
884
    self.be_full = _ComputeFullBeParams(self.op, cluster)
885

    
886
    # build os parameters
887
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
888

    
889
    # now that hvp/bep are in final format, let's reset to defaults,
890
    # if told to do so
891
    if self.op.identify_defaults:
892
      self._RevertToDefaults(cluster)
893

    
894
    # NIC buildup
895
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
896
                             self.proc.GetECId())
897

    
898
    # disk checks/pre-build
899
    default_vg = self.cfg.GetVGName()
900
    self.disks = ComputeDisks(self.op, default_vg)
901

    
902
    if self.op.mode == constants.INSTANCE_IMPORT:
903
      disk_images = []
904
      for idx in range(len(self.disks)):
905
        option = "disk%d_dump" % idx
906
        if export_info.has_option(constants.INISECT_INS, option):
907
          # FIXME: are the old os-es, disk sizes, etc. useful?
908
          export_name = export_info.get(constants.INISECT_INS, option)
909
          image = utils.PathJoin(self.op.src_path, export_name)
910
          disk_images.append(image)
911
        else:
912
          disk_images.append(False)
913

    
914
      self.src_images = disk_images
915

    
916
      if self.op.instance_name == self._old_instance_name:
917
        for idx, nic in enumerate(self.nics):
918
          if nic.mac == constants.VALUE_AUTO:
919
            nic_mac_ini = "nic%d_mac" % idx
920
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
921

    
922
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
923

    
924
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
925
    if self.op.ip_check:
926
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
927
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
928
                                   (self.check_ip, self.op.instance_name),
929
                                   errors.ECODE_NOTUNIQUE)
930

    
931
    #### mac address generation
932
    # By generating here the mac address both the allocator and the hooks get
933
    # the real final mac address rather than the 'auto' or 'generate' value.
934
    # There is a race condition between the generation and the instance object
935
    # creation, which means that we know the mac is valid now, but we're not
936
    # sure it will be when we actually add the instance. If things go bad
937
    # adding the instance will abort because of a duplicate mac, and the
938
    # creation job will fail.
939
    for nic in self.nics:
940
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
941
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
942

    
943
    #### allocator run
944

    
945
    if self.op.iallocator is not None:
946
      self._RunAllocator()
947

    
948
    # Release all unneeded node locks
949
    keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
950
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
951
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
952
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
953

    
954
    assert (self.owned_locks(locking.LEVEL_NODE) ==
955
            self.owned_locks(locking.LEVEL_NODE_RES)), \
956
      "Node locks differ from node resource locks"
957

    
958
    #### node related checks
959

    
960
    # check primary node
961
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
962
    assert self.pnode is not None, \
963
      "Cannot retrieve locked node %s" % self.op.pnode
964
    if pnode.offline:
965
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
966
                                 pnode.name, errors.ECODE_STATE)
967
    if pnode.drained:
968
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
969
                                 pnode.name, errors.ECODE_STATE)
970
    if not pnode.vm_capable:
971
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
972
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
973

    
974
    self.secondaries = []
975

    
976
    # Fill in any IPs from IP pools. This must happen here, because we need to
977
    # know the nic's primary node, as specified by the iallocator
978
    for idx, nic in enumerate(self.nics):
979
      net_uuid = nic.network
980
      if net_uuid is not None:
981
        nobj = self.cfg.GetNetwork(net_uuid)
982
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
983
        if netparams is None:
984
          raise errors.OpPrereqError("No netparams found for network"
985
                                     " %s. Propably not connected to"
986
                                     " node's %s nodegroup" %
987
                                     (nobj.name, self.pnode.name),
988
                                     errors.ECODE_INVAL)
989
        self.LogInfo("NIC/%d inherits netparams %s" %
990
                     (idx, netparams.values()))
991
        nic.nicparams = dict(netparams)
992
        if nic.ip is not None:
993
          if nic.ip.lower() == constants.NIC_IP_POOL:
994
            try:
995
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
996
            except errors.ReservationError:
997
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
998
                                         " from the address pool" % idx,
999
                                         errors.ECODE_STATE)
1000
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1001
          else:
1002
            try:
1003
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1004
                                 check=self.op.conflicts_check)
1005
            except errors.ReservationError:
1006
              raise errors.OpPrereqError("IP address %s already in use"
1007
                                         " or does not belong to network %s" %
1008
                                         (nic.ip, nobj.name),
1009
                                         errors.ECODE_NOTUNIQUE)
1010

    
1011
      # net is None, ip None or given
1012
      elif self.op.conflicts_check:
1013
        _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1014

    
1015
    # mirror node verification
1016
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1017
      if self.op.snode == pnode.name:
1018
        raise errors.OpPrereqError("The secondary node cannot be the"
1019
                                   " primary node", errors.ECODE_INVAL)
1020
      CheckNodeOnline(self, self.op.snode)
1021
      CheckNodeNotDrained(self, self.op.snode)
1022
      CheckNodeVmCapable(self, self.op.snode)
1023
      self.secondaries.append(self.op.snode)
1024

    
1025
      snode = self.cfg.GetNodeInfo(self.op.snode)
1026
      if pnode.group != snode.group:
1027
        self.LogWarning("The primary and secondary nodes are in two"
1028
                        " different node groups; the disk parameters"
1029
                        " from the first disk's node group will be"
1030
                        " used")
1031

    
1032
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1033
      nodes = [pnode]
1034
      if self.op.disk_template in constants.DTS_INT_MIRROR:
1035
        nodes.append(snode)
1036
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1037
      if compat.any(map(has_es, nodes)):
1038
        raise errors.OpPrereqError("Disk template %s not supported with"
1039
                                   " exclusive storage" % self.op.disk_template,
1040
                                   errors.ECODE_STATE)
1041

    
1042
    nodenames = [pnode.name] + self.secondaries
1043

    
1044
    if not self.adopt_disks:
1045
      if self.op.disk_template == constants.DT_RBD:
1046
        # _CheckRADOSFreeSpace() is just a placeholder.
1047
        # Any function that checks prerequisites can be placed here.
1048
        # Check if there is enough space on the RADOS cluster.
1049
        CheckRADOSFreeSpace()
1050
      elif self.op.disk_template == constants.DT_EXT:
1051
        # FIXME: Function that checks prereqs if needed
1052
        pass
1053
      else:
1054
        # Check lv size requirements, if not adopting
1055
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1056
        CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1057

    
1058
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1059
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1060
                                disk[constants.IDISK_ADOPT])
1061
                     for disk in self.disks])
1062
      if len(all_lvs) != len(self.disks):
1063
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1064
                                   errors.ECODE_INVAL)
1065
      for lv_name in all_lvs:
1066
        try:
1067
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1068
          # to ReserveLV uses the same syntax
1069
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1070
        except errors.ReservationError:
1071
          raise errors.OpPrereqError("LV named %s used by another instance" %
1072
                                     lv_name, errors.ECODE_NOTUNIQUE)
1073

    
1074
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1075
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1076

    
1077
      node_lvs = self.rpc.call_lv_list([pnode.name],
1078
                                       vg_names.payload.keys())[pnode.name]
1079
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1080
      node_lvs = node_lvs.payload
1081

    
1082
      delta = all_lvs.difference(node_lvs.keys())
1083
      if delta:
1084
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1085
                                   utils.CommaJoin(delta),
1086
                                   errors.ECODE_INVAL)
1087
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1088
      if online_lvs:
1089
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1090
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1091
                                   errors.ECODE_STATE)
1092
      # update the size of disk based on what is found
1093
      for dsk in self.disks:
1094
        dsk[constants.IDISK_SIZE] = \
1095
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1096
                                        dsk[constants.IDISK_ADOPT])][0]))
1097

    
1098
    elif self.op.disk_template == constants.DT_BLOCK:
1099
      # Normalize and de-duplicate device paths
1100
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1101
                       for disk in self.disks])
1102
      if len(all_disks) != len(self.disks):
1103
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1104
                                   errors.ECODE_INVAL)
1105
      baddisks = [d for d in all_disks
1106
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1107
      if baddisks:
1108
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1109
                                   " cannot be adopted" %
1110
                                   (utils.CommaJoin(baddisks),
1111
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1112
                                   errors.ECODE_INVAL)
1113

    
1114
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
1115
                                            list(all_disks))[pnode.name]
1116
      node_disks.Raise("Cannot get block device information from node %s" %
1117
                       pnode.name)
1118
      node_disks = node_disks.payload
1119
      delta = all_disks.difference(node_disks.keys())
1120
      if delta:
1121
        raise errors.OpPrereqError("Missing block device(s): %s" %
1122
                                   utils.CommaJoin(delta),
1123
                                   errors.ECODE_INVAL)
1124
      for dsk in self.disks:
1125
        dsk[constants.IDISK_SIZE] = \
1126
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1127

    
1128
    # Verify instance specs
1129
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1130
    ispec = {
1131
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1132
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1133
      constants.ISPEC_DISK_COUNT: len(self.disks),
1134
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1135
                                  for disk in self.disks],
1136
      constants.ISPEC_NIC_COUNT: len(self.nics),
1137
      constants.ISPEC_SPINDLE_USE: spindle_use,
1138
      }
1139

    
1140
    group_info = self.cfg.GetNodeGroup(pnode.group)
1141
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1142
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1143
                                               self.op.disk_template)
1144
    if not self.op.ignore_ipolicy and res:
1145
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1146
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1147
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1148

    
1149
    CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1150

    
1151
    CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1152
    # check OS parameters (remotely)
1153
    CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1154

    
1155
    CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1156

    
1157
    #TODO: _CheckExtParams (remotely)
1158
    # Check parameters for extstorage
1159

    
1160
    # memory check on primary node
1161
    #TODO(dynmem): use MINMEM for checking
1162
    if self.op.start:
1163
      CheckNodeFreeMemory(self, self.pnode.name,
1164
                          "creating instance %s" % self.op.instance_name,
1165
                          self.be_full[constants.BE_MAXMEM],
1166
                          self.op.hypervisor)
1167

    
1168
    self.dry_run_result = list(nodenames)
1169

    
1170
  def Exec(self, feedback_fn):
1171
    """Create and add the instance to the cluster.
1172

1173
    """
1174
    instance = self.op.instance_name
1175
    pnode_name = self.pnode.name
1176

    
1177
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1178
                self.owned_locks(locking.LEVEL_NODE)), \
1179
      "Node locks differ from node resource locks"
1180
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1181

    
1182
    ht_kind = self.op.hypervisor
1183
    if ht_kind in constants.HTS_REQ_PORT:
1184
      network_port = self.cfg.AllocatePort()
1185
    else:
1186
      network_port = None
1187

    
1188
    # This is ugly but we got a chicken-egg problem here
1189
    # We can only take the group disk parameters, as the instance
1190
    # has no disks yet (we are generating them right here).
1191
    node = self.cfg.GetNodeInfo(pnode_name)
1192
    nodegroup = self.cfg.GetNodeGroup(node.group)
1193
    disks = GenerateDiskTemplate(self,
1194
                                 self.op.disk_template,
1195
                                 instance, pnode_name,
1196
                                 self.secondaries,
1197
                                 self.disks,
1198
                                 self.instance_file_storage_dir,
1199
                                 self.op.file_driver,
1200
                                 0,
1201
                                 feedback_fn,
1202
                                 self.cfg.GetGroupDiskParams(nodegroup))
1203

    
1204
    iobj = objects.Instance(name=instance, os=self.op.os_type,
1205
                            primary_node=pnode_name,
1206
                            nics=self.nics, disks=disks,
1207
                            disk_template=self.op.disk_template,
1208
                            disks_active=False,
1209
                            admin_state=constants.ADMINST_DOWN,
1210
                            network_port=network_port,
1211
                            beparams=self.op.beparams,
1212
                            hvparams=self.op.hvparams,
1213
                            hypervisor=self.op.hypervisor,
1214
                            osparams=self.op.osparams,
1215
                            )
1216

    
1217
    if self.op.tags:
1218
      for tag in self.op.tags:
1219
        iobj.AddTag(tag)
1220

    
1221
    if self.adopt_disks:
1222
      if self.op.disk_template == constants.DT_PLAIN:
1223
        # rename LVs to the newly-generated names; we need to construct
1224
        # 'fake' LV disks with the old data, plus the new unique_id
1225
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1226
        rename_to = []
1227
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1228
          rename_to.append(t_dsk.logical_id)
1229
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1230
          self.cfg.SetDiskID(t_dsk, pnode_name)
1231
        result = self.rpc.call_blockdev_rename(pnode_name,
1232
                                               zip(tmp_disks, rename_to))
1233
        result.Raise("Failed to rename adoped LVs")
1234
    else:
1235
      feedback_fn("* creating instance disks...")
1236
      try:
1237
        CreateDisks(self, iobj)
1238
      except errors.OpExecError:
1239
        self.LogWarning("Device creation failed")
1240
        self.cfg.ReleaseDRBDMinors(instance)
1241
        raise
1242

    
1243
    feedback_fn("adding instance %s to cluster config" % instance)
1244

    
1245
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1246

    
1247
    # Declare that we don't want to remove the instance lock anymore, as we've
1248
    # added the instance to the config
1249
    del self.remove_locks[locking.LEVEL_INSTANCE]
1250

    
1251
    if self.op.mode == constants.INSTANCE_IMPORT:
1252
      # Release unused nodes
1253
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1254
    else:
1255
      # Release all nodes
1256
      ReleaseLocks(self, locking.LEVEL_NODE)
1257

    
1258
    disk_abort = False
1259
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1260
      feedback_fn("* wiping instance disks...")
1261
      try:
1262
        WipeDisks(self, iobj)
1263
      except errors.OpExecError, err:
1264
        logging.exception("Wiping disks failed")
1265
        self.LogWarning("Wiping instance disks failed (%s)", err)
1266
        disk_abort = True
1267

    
1268
    if disk_abort:
1269
      # Something is already wrong with the disks, don't do anything else
1270
      pass
1271
    elif self.op.wait_for_sync:
1272
      disk_abort = not WaitForSync(self, iobj)
1273
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1274
      # make sure the disks are not degraded (still sync-ing is ok)
1275
      feedback_fn("* checking mirrors status")
1276
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1277
    else:
1278
      disk_abort = False
1279

    
1280
    if disk_abort:
1281
      RemoveDisks(self, iobj)
1282
      self.cfg.RemoveInstance(iobj.name)
1283
      # Make sure the instance lock gets removed
1284
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1285
      raise errors.OpExecError("There are some degraded disks for"
1286
                               " this instance")
1287

    
1288
    # instance disks are now active
1289
    iobj.disks_active = True
1290

    
1291
    # Release all node resource locks
1292
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1293

    
1294
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1295
      # we need to set the disks ID to the primary node, since the
1296
      # preceding code might or might have not done it, depending on
1297
      # disk template and other options
1298
      for disk in iobj.disks:
1299
        self.cfg.SetDiskID(disk, pnode_name)
1300
      if self.op.mode == constants.INSTANCE_CREATE:
1301
        if not self.op.no_install:
1302
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1303
                        not self.op.wait_for_sync)
1304
          if pause_sync:
1305
            feedback_fn("* pausing disk sync to install instance OS")
1306
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1307
                                                              (iobj.disks,
1308
                                                               iobj), True)
1309
            for idx, success in enumerate(result.payload):
1310
              if not success:
1311
                logging.warn("pause-sync of instance %s for disk %d failed",
1312
                             instance, idx)
1313

    
1314
          feedback_fn("* running the instance OS create scripts...")
1315
          # FIXME: pass debug option from opcode to backend
1316
          os_add_result = \
1317
            self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1318
                                          self.op.debug_level)
1319
          if pause_sync:
1320
            feedback_fn("* resuming disk sync")
1321
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1322
                                                              (iobj.disks,
1323
                                                               iobj), False)
1324
            for idx, success in enumerate(result.payload):
1325
              if not success:
1326
                logging.warn("resume-sync of instance %s for disk %d failed",
1327
                             instance, idx)
1328

    
1329
          os_add_result.Raise("Could not add os for instance %s"
1330
                              " on node %s" % (instance, pnode_name))
1331

    
1332
      else:
1333
        if self.op.mode == constants.INSTANCE_IMPORT:
1334
          feedback_fn("* running the instance OS import scripts...")
1335

    
1336
          transfers = []
1337

    
1338
          for idx, image in enumerate(self.src_images):
1339
            if not image:
1340
              continue
1341

    
1342
            # FIXME: pass debug option from opcode to backend
1343
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1344
                                               constants.IEIO_FILE, (image, ),
1345
                                               constants.IEIO_SCRIPT,
1346
                                               (iobj.disks[idx], idx),
1347
                                               None)
1348
            transfers.append(dt)
1349

    
1350
          import_result = \
1351
            masterd.instance.TransferInstanceData(self, feedback_fn,
1352
                                                  self.op.src_node, pnode_name,
1353
                                                  self.pnode.secondary_ip,
1354
                                                  iobj, transfers)
1355
          if not compat.all(import_result):
1356
            self.LogWarning("Some disks for instance %s on node %s were not"
1357
                            " imported successfully" % (instance, pnode_name))
1358

    
1359
          rename_from = self._old_instance_name
1360

    
1361
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1362
          feedback_fn("* preparing remote import...")
1363
          # The source cluster will stop the instance before attempting to make
1364
          # a connection. In some cases stopping an instance can take a long
1365
          # time, hence the shutdown timeout is added to the connection
1366
          # timeout.
1367
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1368
                             self.op.source_shutdown_timeout)
1369
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1370

    
1371
          assert iobj.primary_node == self.pnode.name
1372
          disk_results = \
1373
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1374
                                          self.source_x509_ca,
1375
                                          self._cds, timeouts)
1376
          if not compat.all(disk_results):
1377
            # TODO: Should the instance still be started, even if some disks
1378
            # failed to import (valid for local imports, too)?
1379
            self.LogWarning("Some disks for instance %s on node %s were not"
1380
                            " imported successfully" % (instance, pnode_name))
1381

    
1382
          rename_from = self.source_instance_name
1383

    
1384
        else:
1385
          # also checked in the prereq part
1386
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1387
                                       % self.op.mode)
1388

    
1389
        # Run rename script on newly imported instance
1390
        assert iobj.name == instance
1391
        feedback_fn("Running rename script for %s" % instance)
1392
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1393
                                                   rename_from,
1394
                                                   self.op.debug_level)
1395
        if result.fail_msg:
1396
          self.LogWarning("Failed to run rename script for %s on node"
1397
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
1398

    
1399
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1400

    
1401
    if self.op.start:
1402
      iobj.admin_state = constants.ADMINST_UP
1403
      self.cfg.Update(iobj, feedback_fn)
1404
      logging.info("Starting instance %s on node %s", instance, pnode_name)
1405
      feedback_fn("* starting instance...")
1406
      result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1407
                                            False, self.op.reason)
1408
      result.Raise("Could not start instance")
1409

    
1410
    return list(iobj.all_nodes)
1411

    
1412

    
1413
class LUInstanceRename(LogicalUnit):
1414
  """Rename an instance.
1415

1416
  """
1417
  HPATH = "instance-rename"
1418
  HTYPE = constants.HTYPE_INSTANCE
1419

    
1420
  def CheckArguments(self):
1421
    """Check arguments.
1422

1423
    """
1424
    if self.op.ip_check and not self.op.name_check:
1425
      # TODO: make the ip check more flexible and not depend on the name check
1426
      raise errors.OpPrereqError("IP address check requires a name check",
1427
                                 errors.ECODE_INVAL)
1428

    
1429
  def BuildHooksEnv(self):
1430
    """Build hooks env.
1431

1432
    This runs on master, primary and secondary nodes of the instance.
1433

1434
    """
1435
    env = BuildInstanceHookEnvByObject(self, self.instance)
1436
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1437
    return env
1438

    
1439
  def BuildHooksNodes(self):
1440
    """Build hooks nodes.
1441

1442
    """
1443
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1444
    return (nl, nl)
1445

    
1446
  def CheckPrereq(self):
1447
    """Check prerequisites.
1448

1449
    This checks that the instance is in the cluster and is not running.
1450

1451
    """
1452
    self.op.instance_name = ExpandInstanceName(self.cfg,
1453
                                               self.op.instance_name)
1454
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1455
    assert instance is not None
1456
    CheckNodeOnline(self, instance.primary_node)
1457
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1458
                       msg="cannot rename")
1459
    self.instance = instance
1460

    
1461
    new_name = self.op.new_name
1462
    if self.op.name_check:
1463
      hostname = _CheckHostnameSane(self, new_name)
1464
      new_name = self.op.new_name = hostname.name
1465
      if (self.op.ip_check and
1466
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1467
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1468
                                   (hostname.ip, new_name),
1469
                                   errors.ECODE_NOTUNIQUE)
1470

    
1471
    instance_list = self.cfg.GetInstanceList()
1472
    if new_name in instance_list and new_name != instance.name:
1473
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1474
                                 new_name, errors.ECODE_EXISTS)
1475

    
1476
  def Exec(self, feedback_fn):
1477
    """Rename the instance.
1478

1479
    """
1480
    inst = self.instance
1481
    old_name = inst.name
1482

    
1483
    rename_file_storage = False
1484
    if (inst.disk_template in constants.DTS_FILEBASED and
1485
        self.op.new_name != inst.name):
1486
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1487
      rename_file_storage = True
1488

    
1489
    self.cfg.RenameInstance(inst.name, self.op.new_name)
1490
    # Change the instance lock. This is definitely safe while we hold the BGL.
1491
    # Otherwise the new lock would have to be added in acquired mode.
1492
    assert self.REQ_BGL
1493
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1494
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1495
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1496

    
1497
    # re-read the instance from the configuration after rename
1498
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
1499

    
1500
    if rename_file_storage:
1501
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1502
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1503
                                                     old_file_storage_dir,
1504
                                                     new_file_storage_dir)
1505
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1506
                   " (but the instance has been renamed in Ganeti)" %
1507
                   (inst.primary_node, old_file_storage_dir,
1508
                    new_file_storage_dir))
1509

    
1510
    StartInstanceDisks(self, inst, None)
1511
    # update info on disks
1512
    info = GetInstanceInfoText(inst)
1513
    for (idx, disk) in enumerate(inst.disks):
1514
      for node in inst.all_nodes:
1515
        self.cfg.SetDiskID(disk, node)
1516
        result = self.rpc.call_blockdev_setinfo(node, disk, info)
1517
        if result.fail_msg:
1518
          self.LogWarning("Error setting info on node %s for disk %s: %s",
1519
                          node, idx, result.fail_msg)
1520
    try:
1521
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1522
                                                 old_name, self.op.debug_level)
1523
      msg = result.fail_msg
1524
      if msg:
1525
        msg = ("Could not run OS rename script for instance %s on node %s"
1526
               " (but the instance has been renamed in Ganeti): %s" %
1527
               (inst.name, inst.primary_node, msg))
1528
        self.LogWarning(msg)
1529
    finally:
1530
      ShutdownInstanceDisks(self, inst)
1531

    
1532
    return inst.name
1533

    
1534

    
1535
class LUInstanceRemove(LogicalUnit):
1536
  """Remove an instance.
1537

1538
  """
1539
  HPATH = "instance-remove"
1540
  HTYPE = constants.HTYPE_INSTANCE
1541
  REQ_BGL = False
1542

    
1543
  def ExpandNames(self):
1544
    self._ExpandAndLockInstance()
1545
    self.needed_locks[locking.LEVEL_NODE] = []
1546
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1547
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1548

    
1549
  def DeclareLocks(self, level):
1550
    if level == locking.LEVEL_NODE:
1551
      self._LockInstancesNodes()
1552
    elif level == locking.LEVEL_NODE_RES:
1553
      # Copy node locks
1554
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1555
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1556

    
1557
  def BuildHooksEnv(self):
1558
    """Build hooks env.
1559

1560
    This runs on master, primary and secondary nodes of the instance.
1561

1562
    """
1563
    env = BuildInstanceHookEnvByObject(self, self.instance)
1564
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1565
    return env
1566

    
1567
  def BuildHooksNodes(self):
1568
    """Build hooks nodes.
1569

1570
    """
1571
    nl = [self.cfg.GetMasterNode()]
1572
    nl_post = list(self.instance.all_nodes) + nl
1573
    return (nl, nl_post)
1574

    
1575
  def CheckPrereq(self):
1576
    """Check prerequisites.
1577

1578
    This checks that the instance is in the cluster.
1579

1580
    """
1581
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1582
    assert self.instance is not None, \
1583
      "Cannot retrieve locked instance %s" % self.op.instance_name
1584

    
1585
  def Exec(self, feedback_fn):
1586
    """Remove the instance.
1587

1588
    """
1589
    instance = self.instance
1590
    logging.info("Shutting down instance %s on node %s",
1591
                 instance.name, instance.primary_node)
1592

    
1593
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1594
                                             self.op.shutdown_timeout,
1595
                                             self.op.reason)
1596
    msg = result.fail_msg
1597
    if msg:
1598
      if self.op.ignore_failures:
1599
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
1600
      else:
1601
        raise errors.OpExecError("Could not shutdown instance %s on"
1602
                                 " node %s: %s" %
1603
                                 (instance.name, instance.primary_node, msg))
1604

    
1605
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1606
            self.owned_locks(locking.LEVEL_NODE_RES))
1607
    assert not (set(instance.all_nodes) -
1608
                self.owned_locks(locking.LEVEL_NODE)), \
1609
      "Not owning correct locks"
1610

    
1611
    RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1612

    
1613

    
1614
class LUInstanceMove(LogicalUnit):
1615
  """Move an instance by data-copying.
1616

1617
  """
1618
  HPATH = "instance-move"
1619
  HTYPE = constants.HTYPE_INSTANCE
1620
  REQ_BGL = False
1621

    
1622
  def ExpandNames(self):
1623
    self._ExpandAndLockInstance()
1624
    target_node = ExpandNodeName(self.cfg, self.op.target_node)
1625
    self.op.target_node = target_node
1626
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
1627
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1628
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1629

    
1630
  def DeclareLocks(self, level):
1631
    if level == locking.LEVEL_NODE:
1632
      self._LockInstancesNodes(primary_only=True)
1633
    elif level == locking.LEVEL_NODE_RES:
1634
      # Copy node locks
1635
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1636
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1637

    
1638
  def BuildHooksEnv(self):
1639
    """Build hooks env.
1640

1641
    This runs on master, primary and secondary nodes of the instance.
1642

1643
    """
1644
    env = {
1645
      "TARGET_NODE": self.op.target_node,
1646
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1647
      }
1648
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1649
    return env
1650

    
1651
  def BuildHooksNodes(self):
1652
    """Build hooks nodes.
1653

1654
    """
1655
    nl = [
1656
      self.cfg.GetMasterNode(),
1657
      self.instance.primary_node,
1658
      self.op.target_node,
1659
      ]
1660
    return (nl, nl)
1661

    
1662
  def CheckPrereq(self):
1663
    """Check prerequisites.
1664

1665
    This checks that the instance is in the cluster.
1666

1667
    """
1668
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1669
    assert self.instance is not None, \
1670
      "Cannot retrieve locked instance %s" % self.op.instance_name
1671

    
1672
    if instance.disk_template not in constants.DTS_COPYABLE:
1673
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1674
                                 instance.disk_template, errors.ECODE_STATE)
1675

    
1676
    node = self.cfg.GetNodeInfo(self.op.target_node)
1677
    assert node is not None, \
1678
      "Cannot retrieve locked node %s" % self.op.target_node
1679

    
1680
    self.target_node = target_node = node.name
1681

    
1682
    if target_node == instance.primary_node:
1683
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1684
                                 (instance.name, target_node),
1685
                                 errors.ECODE_STATE)
1686

    
1687
    bep = self.cfg.GetClusterInfo().FillBE(instance)
1688

    
1689
    for idx, dsk in enumerate(instance.disks):
1690
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1691
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1692
                                   " cannot copy" % idx, errors.ECODE_STATE)
1693

    
1694
    CheckNodeOnline(self, target_node)
1695
    CheckNodeNotDrained(self, target_node)
1696
    CheckNodeVmCapable(self, target_node)
1697
    cluster = self.cfg.GetClusterInfo()
1698
    group_info = self.cfg.GetNodeGroup(node.group)
1699
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1700
    CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1701
                           ignore=self.op.ignore_ipolicy)
1702

    
1703
    if instance.admin_state == constants.ADMINST_UP:
1704
      # check memory requirements on the secondary node
1705
      CheckNodeFreeMemory(self, target_node,
1706
                          "failing over instance %s" %
1707
                          instance.name, bep[constants.BE_MAXMEM],
1708
                          instance.hypervisor)
1709
    else:
1710
      self.LogInfo("Not checking memory on the secondary node as"
1711
                   " instance will not be started")
1712

    
1713
    # check bridge existance
1714
    CheckInstanceBridgesExist(self, instance, node=target_node)
1715

    
1716
  def Exec(self, feedback_fn):
1717
    """Move an instance.
1718

1719
    The move is done by shutting it down on its present node, copying
1720
    the data over (slow) and starting it on the new node.
1721

1722
    """
1723
    instance = self.instance
1724

    
1725
    source_node = instance.primary_node
1726
    target_node = self.target_node
1727

    
1728
    self.LogInfo("Shutting down instance %s on source node %s",
1729
                 instance.name, source_node)
1730

    
1731
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1732
            self.owned_locks(locking.LEVEL_NODE_RES))
1733

    
1734
    result = self.rpc.call_instance_shutdown(source_node, instance,
1735
                                             self.op.shutdown_timeout,
1736
                                             self.op.reason)
1737
    msg = result.fail_msg
1738
    if msg:
1739
      if self.op.ignore_consistency:
1740
        self.LogWarning("Could not shutdown instance %s on node %s."
1741
                        " Proceeding anyway. Please make sure node"
1742
                        " %s is down. Error details: %s",
1743
                        instance.name, source_node, source_node, msg)
1744
      else:
1745
        raise errors.OpExecError("Could not shutdown instance %s on"
1746
                                 " node %s: %s" %
1747
                                 (instance.name, source_node, msg))
1748

    
1749
    # create the target disks
1750
    try:
1751
      CreateDisks(self, instance, target_node=target_node)
1752
    except errors.OpExecError:
1753
      self.LogWarning("Device creation failed")
1754
      self.cfg.ReleaseDRBDMinors(instance.name)
1755
      raise
1756

    
1757
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1758

    
1759
    errs = []
1760
    # activate, get path, copy the data over
1761
    for idx, disk in enumerate(instance.disks):
1762
      self.LogInfo("Copying data for disk %d", idx)
1763
      result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1764
                                               instance.name, True, idx)
1765
      if result.fail_msg:
1766
        self.LogWarning("Can't assemble newly created disk %d: %s",
1767
                        idx, result.fail_msg)
1768
        errs.append(result.fail_msg)
1769
        break
1770
      dev_path, _ = result.payload
1771
      result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1772
                                             target_node, dev_path,
1773
                                             cluster_name)
1774
      if result.fail_msg:
1775
        self.LogWarning("Can't copy data over for disk %d: %s",
1776
                        idx, result.fail_msg)
1777
        errs.append(result.fail_msg)
1778
        break
1779

    
1780
    if errs:
1781
      self.LogWarning("Some disks failed to copy, aborting")
1782
      try:
1783
        RemoveDisks(self, instance, target_node=target_node)
1784
      finally:
1785
        self.cfg.ReleaseDRBDMinors(instance.name)
1786
        raise errors.OpExecError("Errors during disk copy: %s" %
1787
                                 (",".join(errs),))
1788

    
1789
    instance.primary_node = target_node
1790
    self.cfg.Update(instance, feedback_fn)
1791

    
1792
    self.LogInfo("Removing the disks on the original node")
1793
    RemoveDisks(self, instance, target_node=source_node)
1794

    
1795
    # Only start the instance if it's marked as up
1796
    if instance.admin_state == constants.ADMINST_UP:
1797
      self.LogInfo("Starting instance %s on node %s",
1798
                   instance.name, target_node)
1799

    
1800
      disks_ok, _ = AssembleInstanceDisks(self, instance,
1801
                                          ignore_secondaries=True)
1802
      if not disks_ok:
1803
        ShutdownInstanceDisks(self, instance)
1804
        raise errors.OpExecError("Can't activate the instance's disks")
1805

    
1806
      result = self.rpc.call_instance_start(target_node,
1807
                                            (instance, None, None), False,
1808
                                            self.op.reason)
1809
      msg = result.fail_msg
1810
      if msg:
1811
        ShutdownInstanceDisks(self, instance)
1812
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1813
                                 (instance.name, target_node, msg))
1814

    
1815

    
1816
class LUInstanceMultiAlloc(NoHooksLU):
1817
  """Allocates multiple instances at the same time.
1818

1819
  """
1820
  REQ_BGL = False
1821

    
1822
  def CheckArguments(self):
1823
    """Check arguments.
1824

1825
    """
1826
    nodes = []
1827
    for inst in self.op.instances:
1828
      if inst.iallocator is not None:
1829
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1830
                                   " instance objects", errors.ECODE_INVAL)
1831
      nodes.append(bool(inst.pnode))
1832
      if inst.disk_template in constants.DTS_INT_MIRROR:
1833
        nodes.append(bool(inst.snode))
1834

    
1835
    has_nodes = compat.any(nodes)
1836
    if compat.all(nodes) ^ has_nodes:
1837
      raise errors.OpPrereqError("There are instance objects providing"
1838
                                 " pnode/snode while others do not",
1839
                                 errors.ECODE_INVAL)
1840

    
1841
    if not has_nodes and self.op.iallocator is None:
1842
      default_iallocator = self.cfg.GetDefaultIAllocator()
1843
      if default_iallocator:
1844
        self.op.iallocator = default_iallocator
1845
      else:
1846
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1847
                                   " given and no cluster-wide default"
1848
                                   " iallocator found; please specify either"
1849
                                   " an iallocator or nodes on the instances"
1850
                                   " or set a cluster-wide default iallocator",
1851
                                   errors.ECODE_INVAL)
1852

    
1853
    _CheckOpportunisticLocking(self.op)
1854

    
1855
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1856
    if dups:
1857
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1858
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1859

    
1860
  def ExpandNames(self):
1861
    """Calculate the locks.
1862

1863
    """
1864
    self.share_locks = ShareAll()
1865
    self.needed_locks = {
1866
      # iallocator will select nodes and even if no iallocator is used,
1867
      # collisions with LUInstanceCreate should be avoided
1868
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1869
      }
1870

    
1871
    if self.op.iallocator:
1872
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1873
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1874

    
1875
      if self.op.opportunistic_locking:
1876
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1877
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1878
    else:
1879
      nodeslist = []
1880
      for inst in self.op.instances:
1881
        inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1882
        nodeslist.append(inst.pnode)
1883
        if inst.snode is not None:
1884
          inst.snode = ExpandNodeName(self.cfg, inst.snode)
1885
          nodeslist.append(inst.snode)
1886

    
1887
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1888
      # Lock resources of instance's primary and secondary nodes (copy to
1889
      # prevent accidential modification)
1890
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1891

    
1892
  def CheckPrereq(self):
1893
    """Check prerequisite.
1894

1895
    """
1896
    if self.op.iallocator:
1897
      cluster = self.cfg.GetClusterInfo()
1898
      default_vg = self.cfg.GetVGName()
1899
      ec_id = self.proc.GetECId()
1900

    
1901
      if self.op.opportunistic_locking:
1902
        # Only consider nodes for which a lock is held
1903
        node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1904
      else:
1905
        node_whitelist = None
1906

    
1907
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1908
                                           _ComputeNics(op, cluster, None,
1909
                                                        self.cfg, ec_id),
1910
                                           _ComputeFullBeParams(op, cluster),
1911
                                           node_whitelist)
1912
               for op in self.op.instances]
1913

    
1914
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1915
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1916

    
1917
      ial.Run(self.op.iallocator)
1918

    
1919
      if not ial.success:
1920
        raise errors.OpPrereqError("Can't compute nodes using"
1921
                                   " iallocator '%s': %s" %
1922
                                   (self.op.iallocator, ial.info),
1923
                                   errors.ECODE_NORES)
1924

    
1925
      self.ia_result = ial.result
1926

    
1927
    if self.op.dry_run:
1928
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1929
        constants.JOB_IDS_KEY: [],
1930
        })
1931

    
1932
  def _ConstructPartialResult(self):
1933
    """Contructs the partial result.
1934

1935
    """
1936
    if self.op.iallocator:
1937
      (allocatable, failed_insts) = self.ia_result
1938
      allocatable_insts = map(compat.fst, allocatable)
1939
    else:
1940
      allocatable_insts = [op.instance_name for op in self.op.instances]
1941
      failed_insts = []
1942

    
1943
    return {
1944
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
1945
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
1946
      }
1947

    
1948
  def Exec(self, feedback_fn):
1949
    """Executes the opcode.
1950

1951
    """
1952
    jobs = []
1953
    if self.op.iallocator:
1954
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
1955
      (allocatable, failed) = self.ia_result
1956

    
1957
      for (name, nodes) in allocatable:
1958
        op = op2inst.pop(name)
1959

    
1960
        if len(nodes) > 1:
1961
          (op.pnode, op.snode) = nodes
1962
        else:
1963
          (op.pnode,) = nodes
1964

    
1965
        jobs.append([op])
1966

    
1967
      missing = set(op2inst.keys()) - set(failed)
1968
      assert not missing, \
1969
        "Iallocator did return incomplete result: %s" % \
1970
        utils.CommaJoin(missing)
1971
    else:
1972
      jobs.extend([op] for op in self.op.instances)
1973

    
1974
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
1975

    
1976

    
1977
class _InstNicModPrivate:
1978
  """Data structure for network interface modifications.
1979

1980
  Used by L{LUInstanceSetParams}.
1981

1982
  """
1983
  def __init__(self):
1984
    self.params = None
1985
    self.filled = None
1986

    
1987

    
1988
def _PrepareContainerMods(mods, private_fn):
1989
  """Prepares a list of container modifications by adding a private data field.
1990

1991
  @type mods: list of tuples; (operation, index, parameters)
1992
  @param mods: List of modifications
1993
  @type private_fn: callable or None
1994
  @param private_fn: Callable for constructing a private data field for a
1995
    modification
1996
  @rtype: list
1997

1998
  """
1999
  if private_fn is None:
2000
    fn = lambda: None
2001
  else:
2002
    fn = private_fn
2003

    
2004
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2005

    
2006

    
2007
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
2008
  """Checks if nodes have enough physical CPUs
2009

2010
  This function checks if all given nodes have the needed number of
2011
  physical CPUs. In case any node has less CPUs or we cannot get the
2012
  information from the node, this function raises an OpPrereqError
2013
  exception.
2014

2015
  @type lu: C{LogicalUnit}
2016
  @param lu: a logical unit from which we get configuration data
2017
  @type nodenames: C{list}
2018
  @param nodenames: the list of node names to check
2019
  @type requested: C{int}
2020
  @param requested: the minimum acceptable number of physical CPUs
2021
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2022
      or we cannot check the node
2023

2024
  """
2025
  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2026
  for node in nodenames:
2027
    info = nodeinfo[node]
2028
    info.Raise("Cannot get current information from node %s" % node,
2029
               prereq=True, ecode=errors.ECODE_ENVIRON)
2030
    (_, _, (hv_info, )) = info.payload
2031
    num_cpus = hv_info.get("cpu_total", None)
2032
    if not isinstance(num_cpus, int):
2033
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2034
                                 " on node %s, result was '%s'" %
2035
                                 (node, num_cpus), errors.ECODE_ENVIRON)
2036
    if requested > num_cpus:
2037
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2038
                                 "required" % (node, num_cpus, requested),
2039
                                 errors.ECODE_NORES)
2040

    
2041

    
2042
def GetItemFromContainer(identifier, kind, container):
2043
  """Return the item refered by the identifier.
2044

2045
  @type identifier: string
2046
  @param identifier: Item index or name or UUID
2047
  @type kind: string
2048
  @param kind: One-word item description
2049
  @type container: list
2050
  @param container: Container to get the item from
2051

2052
  """
2053
  # Index
2054
  try:
2055
    idx = int(identifier)
2056
    if idx == -1:
2057
      # Append
2058
      absidx = len(container) - 1
2059
    elif idx < 0:
2060
      raise IndexError("Not accepting negative indices other than -1")
2061
    elif idx > len(container):
2062
      raise IndexError("Got %s index %s, but there are only %s" %
2063
                       (kind, idx, len(container)))
2064
    else:
2065
      absidx = idx
2066
    return (absidx, container[idx])
2067
  except ValueError:
2068
    pass
2069

    
2070
  for idx, item in enumerate(container):
2071
    if item.uuid == identifier or item.name == identifier:
2072
      return (idx, item)
2073

    
2074
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2075
                             (kind, identifier), errors.ECODE_NOENT)
2076

    
2077

    
2078
def _ApplyContainerMods(kind, container, chgdesc, mods,
2079
                        create_fn, modify_fn, remove_fn):
2080
  """Applies descriptions in C{mods} to C{container}.
2081

2082
  @type kind: string
2083
  @param kind: One-word item description
2084
  @type container: list
2085
  @param container: Container to modify
2086
  @type chgdesc: None or list
2087
  @param chgdesc: List of applied changes
2088
  @type mods: list
2089
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2090
  @type create_fn: callable
2091
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2092
    receives absolute item index, parameters and private data object as added
2093
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2094
    as list
2095
  @type modify_fn: callable
2096
  @param modify_fn: Callback for modifying an existing item
2097
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2098
    and private data object as added by L{_PrepareContainerMods}, returns
2099
    changes as list
2100
  @type remove_fn: callable
2101
  @param remove_fn: Callback on removing item; receives absolute item index,
2102
    item and private data object as added by L{_PrepareContainerMods}
2103

2104
  """
2105
  for (op, identifier, params, private) in mods:
2106
    changes = None
2107

    
2108
    if op == constants.DDM_ADD:
2109
      # Calculate where item will be added
2110
      # When adding an item, identifier can only be an index
2111
      try:
2112
        idx = int(identifier)
2113
      except ValueError:
2114
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2115
                                   " identifier for %s" % constants.DDM_ADD,
2116
                                   errors.ECODE_INVAL)
2117
      if idx == -1:
2118
        addidx = len(container)
2119
      else:
2120
        if idx < 0:
2121
          raise IndexError("Not accepting negative indices other than -1")
2122
        elif idx > len(container):
2123
          raise IndexError("Got %s index %s, but there are only %s" %
2124
                           (kind, idx, len(container)))
2125
        addidx = idx
2126

    
2127
      if create_fn is None:
2128
        item = params
2129
      else:
2130
        (item, changes) = create_fn(addidx, params, private)
2131

    
2132
      if idx == -1:
2133
        container.append(item)
2134
      else:
2135
        assert idx >= 0
2136
        assert idx <= len(container)
2137
        # list.insert does so before the specified index
2138
        container.insert(idx, item)
2139
    else:
2140
      # Retrieve existing item
2141
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2142

    
2143
      if op == constants.DDM_REMOVE:
2144
        assert not params
2145

    
2146
        if remove_fn is not None:
2147
          remove_fn(absidx, item, private)
2148

    
2149
        changes = [("%s/%s" % (kind, absidx), "remove")]
2150

    
2151
        assert container[absidx] == item
2152
        del container[absidx]
2153
      elif op == constants.DDM_MODIFY:
2154
        if modify_fn is not None:
2155
          changes = modify_fn(absidx, item, params, private)
2156
      else:
2157
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2158

    
2159
    assert _TApplyContModsCbChanges(changes)
2160

    
2161
    if not (chgdesc is None or changes is None):
2162
      chgdesc.extend(changes)
2163

    
2164

    
2165
def _UpdateIvNames(base_index, disks):
2166
  """Updates the C{iv_name} attribute of disks.
2167

2168
  @type disks: list of L{objects.Disk}
2169

2170
  """
2171
  for (idx, disk) in enumerate(disks):
2172
    disk.iv_name = "disk/%s" % (base_index + idx, )
2173

    
2174

    
2175
class LUInstanceSetParams(LogicalUnit):
2176
  """Modifies an instances's parameters.
2177

2178
  """
2179
  HPATH = "instance-modify"
2180
  HTYPE = constants.HTYPE_INSTANCE
2181
  REQ_BGL = False
2182

    
2183
  @staticmethod
2184
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2185
    assert ht.TList(mods)
2186
    assert not mods or len(mods[0]) in (2, 3)
2187

    
2188
    if mods and len(mods[0]) == 2:
2189
      result = []
2190

    
2191
      addremove = 0
2192
      for op, params in mods:
2193
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2194
          result.append((op, -1, params))
2195
          addremove += 1
2196

    
2197
          if addremove > 1:
2198
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2199
                                       " supported at a time" % kind,
2200
                                       errors.ECODE_INVAL)
2201
        else:
2202
          result.append((constants.DDM_MODIFY, op, params))
2203

    
2204
      assert verify_fn(result)
2205
    else:
2206
      result = mods
2207

    
2208
    return result
2209

    
2210
  @staticmethod
2211
  def _CheckMods(kind, mods, key_types, item_fn):
2212
    """Ensures requested disk/NIC modifications are valid.
2213

2214
    """
2215
    for (op, _, params) in mods:
2216
      assert ht.TDict(params)
2217

    
2218
      # If 'key_types' is an empty dict, we assume we have an
2219
      # 'ext' template and thus do not ForceDictType
2220
      if key_types:
2221
        utils.ForceDictType(params, key_types)
2222

    
2223
      if op == constants.DDM_REMOVE:
2224
        if params:
2225
          raise errors.OpPrereqError("No settings should be passed when"
2226
                                     " removing a %s" % kind,
2227
                                     errors.ECODE_INVAL)
2228
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2229
        item_fn(op, params)
2230
      else:
2231
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2232

    
2233
  @staticmethod
2234
  def _VerifyDiskModification(op, params):
2235
    """Verifies a disk modification.
2236

2237
    """
2238
    if op == constants.DDM_ADD:
2239
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2240
      if mode not in constants.DISK_ACCESS_SET:
2241
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2242
                                   errors.ECODE_INVAL)
2243

    
2244
      size = params.get(constants.IDISK_SIZE, None)
2245
      if size is None:
2246
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2247
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2248

    
2249
      try:
2250
        size = int(size)
2251
      except (TypeError, ValueError), err:
2252
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2253
                                   errors.ECODE_INVAL)
2254

    
2255
      params[constants.IDISK_SIZE] = size
2256
      name = params.get(constants.IDISK_NAME, None)
2257
      if name is not None and name.lower() == constants.VALUE_NONE:
2258
        params[constants.IDISK_NAME] = None
2259

    
2260
    elif op == constants.DDM_MODIFY:
2261
      if constants.IDISK_SIZE in params:
2262
        raise errors.OpPrereqError("Disk size change not possible, use"
2263
                                   " grow-disk", errors.ECODE_INVAL)
2264
      if len(params) > 2:
2265
        raise errors.OpPrereqError("Disk modification doesn't support"
2266
                                   " additional arbitrary parameters",
2267
                                   errors.ECODE_INVAL)
2268
      name = params.get(constants.IDISK_NAME, None)
2269
      if name is not None and name.lower() == constants.VALUE_NONE:
2270
        params[constants.IDISK_NAME] = None
2271

    
2272
  @staticmethod
2273
  def _VerifyNicModification(op, params):
2274
    """Verifies a network interface modification.
2275

2276
    """
2277
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2278
      ip = params.get(constants.INIC_IP, None)
2279
      name = params.get(constants.INIC_NAME, None)
2280
      req_net = params.get(constants.INIC_NETWORK, None)
2281
      link = params.get(constants.NIC_LINK, None)
2282
      mode = params.get(constants.NIC_MODE, None)
2283
      if name is not None and name.lower() == constants.VALUE_NONE:
2284
        params[constants.INIC_NAME] = None
2285
      if req_net is not None:
2286
        if req_net.lower() == constants.VALUE_NONE:
2287
          params[constants.INIC_NETWORK] = None
2288
          req_net = None
2289
        elif link is not None or mode is not None:
2290
          raise errors.OpPrereqError("If network is given"
2291
                                     " mode or link should not",
2292
                                     errors.ECODE_INVAL)
2293

    
2294
      if op == constants.DDM_ADD:
2295
        macaddr = params.get(constants.INIC_MAC, None)
2296
        if macaddr is None:
2297
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2298

    
2299
      if ip is not None:
2300
        if ip.lower() == constants.VALUE_NONE:
2301
          params[constants.INIC_IP] = None
2302
        else:
2303
          if ip.lower() == constants.NIC_IP_POOL:
2304
            if op == constants.DDM_ADD and req_net is None:
2305
              raise errors.OpPrereqError("If ip=pool, parameter network"
2306
                                         " cannot be none",
2307
                                         errors.ECODE_INVAL)
2308
          else:
2309
            if not netutils.IPAddress.IsValid(ip):
2310
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2311
                                         errors.ECODE_INVAL)
2312

    
2313
      if constants.INIC_MAC in params:
2314
        macaddr = params[constants.INIC_MAC]
2315
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2316
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2317

    
2318
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2319
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2320
                                     " modifying an existing NIC",
2321
                                     errors.ECODE_INVAL)
2322

    
2323
  def CheckArguments(self):
2324
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2325
            self.op.hvparams or self.op.beparams or self.op.os_name or
2326
            self.op.osparams or self.op.offline is not None or
2327
            self.op.runtime_mem or self.op.pnode):
2328
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2329

    
2330
    if self.op.hvparams:
2331
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2332
                           "hypervisor", "instance", "cluster")
2333

    
2334
    self.op.disks = self._UpgradeDiskNicMods(
2335
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2336
    self.op.nics = self._UpgradeDiskNicMods(
2337
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2338

    
2339
    if self.op.disks and self.op.disk_template is not None:
2340
      raise errors.OpPrereqError("Disk template conversion and other disk"
2341
                                 " changes not supported at the same time",
2342
                                 errors.ECODE_INVAL)
2343

    
2344
    if (self.op.disk_template and
2345
        self.op.disk_template in constants.DTS_INT_MIRROR and
2346
        self.op.remote_node is None):
2347
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2348
                                 " one requires specifying a secondary node",
2349
                                 errors.ECODE_INVAL)
2350

    
2351
    # Check NIC modifications
2352
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2353
                    self._VerifyNicModification)
2354

    
2355
    if self.op.pnode:
2356
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2357

    
2358
  def ExpandNames(self):
2359
    self._ExpandAndLockInstance()
2360
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2361
    # Can't even acquire node locks in shared mode as upcoming changes in
2362
    # Ganeti 2.6 will start to modify the node object on disk conversion
2363
    self.needed_locks[locking.LEVEL_NODE] = []
2364
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2365
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2366
    # Look node group to look up the ipolicy
2367
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2368

    
2369
  def DeclareLocks(self, level):
2370
    if level == locking.LEVEL_NODEGROUP:
2371
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2372
      # Acquire locks for the instance's nodegroups optimistically. Needs
2373
      # to be verified in CheckPrereq
2374
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2375
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2376
    elif level == locking.LEVEL_NODE:
2377
      self._LockInstancesNodes()
2378
      if self.op.disk_template and self.op.remote_node:
2379
        self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2380
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2381
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2382
      # Copy node locks
2383
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2384
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2385

    
2386
  def BuildHooksEnv(self):
2387
    """Build hooks env.
2388

2389
    This runs on the master, primary and secondaries.
2390

2391
    """
2392
    args = {}
2393
    if constants.BE_MINMEM in self.be_new:
2394
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2395
    if constants.BE_MAXMEM in self.be_new:
2396
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2397
    if constants.BE_VCPUS in self.be_new:
2398
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2399
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2400
    # information at all.
2401

    
2402
    if self._new_nics is not None:
2403
      nics = []
2404

    
2405
      for nic in self._new_nics:
2406
        n = copy.deepcopy(nic)
2407
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2408
        n.nicparams = nicparams
2409
        nics.append(NICToTuple(self, n))
2410

    
2411
      args["nics"] = nics
2412

    
2413
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2414
    if self.op.disk_template:
2415
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2416
    if self.op.runtime_mem:
2417
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2418

    
2419
    return env
2420

    
2421
  def BuildHooksNodes(self):
2422
    """Build hooks nodes.
2423

2424
    """
2425
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2426
    return (nl, nl)
2427

    
2428
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2429
                              old_params, cluster, pnode):
2430

    
2431
    update_params_dict = dict([(key, params[key])
2432
                               for key in constants.NICS_PARAMETERS
2433
                               if key in params])
2434

    
2435
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2436
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2437

    
2438
    new_net_uuid = None
2439
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2440
    if new_net_uuid_or_name:
2441
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2442
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2443

    
2444
    if old_net_uuid:
2445
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2446

    
2447
    if new_net_uuid:
2448
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2449
      if not netparams:
2450
        raise errors.OpPrereqError("No netparams found for the network"
2451
                                   " %s, probably not connected" %
2452
                                   new_net_obj.name, errors.ECODE_INVAL)
2453
      new_params = dict(netparams)
2454
    else:
2455
      new_params = GetUpdatedParams(old_params, update_params_dict)
2456

    
2457
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2458

    
2459
    new_filled_params = cluster.SimpleFillNIC(new_params)
2460
    objects.NIC.CheckParameterSyntax(new_filled_params)
2461

    
2462
    new_mode = new_filled_params[constants.NIC_MODE]
2463
    if new_mode == constants.NIC_MODE_BRIDGED:
2464
      bridge = new_filled_params[constants.NIC_LINK]
2465
      msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2466
      if msg:
2467
        msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2468
        if self.op.force:
2469
          self.warn.append(msg)
2470
        else:
2471
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2472

    
2473
    elif new_mode == constants.NIC_MODE_ROUTED:
2474
      ip = params.get(constants.INIC_IP, old_ip)
2475
      if ip is None:
2476
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2477
                                   " on a routed NIC", errors.ECODE_INVAL)
2478

    
2479
    elif new_mode == constants.NIC_MODE_OVS:
2480
      # TODO: check OVS link
2481
      self.LogInfo("OVS links are currently not checked for correctness")
2482

    
2483
    if constants.INIC_MAC in params:
2484
      mac = params[constants.INIC_MAC]
2485
      if mac is None:
2486
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2487
                                   errors.ECODE_INVAL)
2488
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2489
        # otherwise generate the MAC address
2490
        params[constants.INIC_MAC] = \
2491
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2492
      else:
2493
        # or validate/reserve the current one
2494
        try:
2495
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2496
        except errors.ReservationError:
2497
          raise errors.OpPrereqError("MAC address '%s' already in use"
2498
                                     " in cluster" % mac,
2499
                                     errors.ECODE_NOTUNIQUE)
2500
    elif new_net_uuid != old_net_uuid:
2501

    
2502
      def get_net_prefix(net_uuid):
2503
        mac_prefix = None
2504
        if net_uuid:
2505
          nobj = self.cfg.GetNetwork(net_uuid)
2506
          mac_prefix = nobj.mac_prefix
2507

    
2508
        return mac_prefix
2509

    
2510
      new_prefix = get_net_prefix(new_net_uuid)
2511
      old_prefix = get_net_prefix(old_net_uuid)
2512
      if old_prefix != new_prefix:
2513
        params[constants.INIC_MAC] = \
2514
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2515

    
2516
    # if there is a change in (ip, network) tuple
2517
    new_ip = params.get(constants.INIC_IP, old_ip)
2518
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2519
      if new_ip:
2520
        # if IP is pool then require a network and generate one IP
2521
        if new_ip.lower() == constants.NIC_IP_POOL:
2522
          if new_net_uuid:
2523
            try:
2524
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2525
            except errors.ReservationError:
2526
              raise errors.OpPrereqError("Unable to get a free IP"
2527
                                         " from the address pool",
2528
                                         errors.ECODE_STATE)
2529
            self.LogInfo("Chose IP %s from network %s",
2530
                         new_ip,
2531
                         new_net_obj.name)
2532
            params[constants.INIC_IP] = new_ip
2533
          else:
2534
            raise errors.OpPrereqError("ip=pool, but no network found",
2535
                                       errors.ECODE_INVAL)
2536
        # Reserve new IP if in the new network if any
2537
        elif new_net_uuid:
2538
          try:
2539
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2540
                               check=self.op.conflicts_check)
2541
            self.LogInfo("Reserving IP %s in network %s",
2542
                         new_ip, new_net_obj.name)
2543
          except errors.ReservationError:
2544
            raise errors.OpPrereqError("IP %s not available in network %s" %
2545
                                       (new_ip, new_net_obj.name),
2546
                                       errors.ECODE_NOTUNIQUE)
2547
        # new network is None so check if new IP is a conflicting IP
2548
        elif self.op.conflicts_check:
2549
          _CheckForConflictingIp(self, new_ip, pnode)
2550

    
2551
      # release old IP if old network is not None
2552
      if old_ip and old_net_uuid:
2553
        try:
2554
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2555
        except errors.AddressPoolError:
2556
          logging.warning("Release IP %s not contained in network %s",
2557
                          old_ip, old_net_obj.name)
2558

    
2559
    # there are no changes in (ip, network) tuple and old network is not None
2560
    elif (old_net_uuid is not None and
2561
          (req_link is not None or req_mode is not None)):
2562
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2563
                                 " a NIC that is connected to a network",
2564
                                 errors.ECODE_INVAL)
2565

    
2566
    private.params = new_params
2567
    private.filled = new_filled_params
2568

    
2569
  def _PreCheckDiskTemplate(self, pnode_info):
2570
    """CheckPrereq checks related to a new disk template."""
2571
    # Arguments are passed to avoid configuration lookups
2572
    instance = self.instance
2573
    pnode = instance.primary_node
2574
    cluster = self.cluster
2575
    if instance.disk_template == self.op.disk_template:
2576
      raise errors.OpPrereqError("Instance already has disk template %s" %
2577
                                 instance.disk_template, errors.ECODE_INVAL)
2578

    
2579
    if (instance.disk_template,
2580
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2581
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2582
                                 " %s to %s" % (instance.disk_template,
2583
                                                self.op.disk_template),
2584
                                 errors.ECODE_INVAL)
2585
    CheckInstanceState(self, instance, INSTANCE_DOWN,
2586
                       msg="cannot change disk template")
2587
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2588
      if self.op.remote_node == pnode:
2589
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2590
                                   " as the primary node of the instance" %
2591
                                   self.op.remote_node, errors.ECODE_STATE)
2592
      CheckNodeOnline(self, self.op.remote_node)
2593
      CheckNodeNotDrained(self, self.op.remote_node)
2594
      # FIXME: here we assume that the old instance type is DT_PLAIN
2595
      assert instance.disk_template == constants.DT_PLAIN
2596
      disks = [{constants.IDISK_SIZE: d.size,
2597
                constants.IDISK_VG: d.logical_id[0]}
2598
               for d in instance.disks]
2599
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2600
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2601

    
2602
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2603
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2604
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2605
                                                              snode_group)
2606
      CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2607
                             ignore=self.op.ignore_ipolicy)
2608
      if pnode_info.group != snode_info.group:
2609
        self.LogWarning("The primary and secondary nodes are in two"
2610
                        " different node groups; the disk parameters"
2611
                        " from the first disk's node group will be"
2612
                        " used")
2613

    
2614
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2615
      # Make sure none of the nodes require exclusive storage
2616
      nodes = [pnode_info]
2617
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2618
        assert snode_info
2619
        nodes.append(snode_info)
2620
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2621
      if compat.any(map(has_es, nodes)):
2622
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2623
                  " storage is enabled" % (instance.disk_template,
2624
                                           self.op.disk_template))
2625
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2626

    
2627
  def CheckPrereq(self):
2628
    """Check prerequisites.
2629

2630
    This only checks the instance list against the existing names.
2631

2632
    """
2633
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2634
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2635

    
2636
    cluster = self.cluster = self.cfg.GetClusterInfo()
2637
    assert self.instance is not None, \
2638
      "Cannot retrieve locked instance %s" % self.op.instance_name
2639

    
2640
    pnode = instance.primary_node
2641

    
2642
    self.warn = []
2643

    
2644
    if (self.op.pnode is not None and self.op.pnode != pnode and
2645
        not self.op.force):
2646
      # verify that the instance is not up
2647
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2648
                                                  instance.hypervisor)
2649
      if instance_info.fail_msg:
2650
        self.warn.append("Can't get instance runtime information: %s" %
2651
                         instance_info.fail_msg)
2652
      elif instance_info.payload:
2653
        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2654
                                   errors.ECODE_STATE)
2655

    
2656
    assert pnode in self.owned_locks(locking.LEVEL_NODE)
2657
    nodelist = list(instance.all_nodes)
2658
    pnode_info = self.cfg.GetNodeInfo(pnode)
2659
    self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2660

    
2661
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2662
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2663
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2664

    
2665
    # dictionary with instance information after the modification
2666
    ispec = {}
2667

    
2668
    if self.op.hotplug:
2669
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2670
                                               self.instance)
2671
      result.Raise("Hotplug is not supported.")
2672

    
2673
    # Check disk modifications. This is done here and not in CheckArguments
2674
    # (as with NICs), because we need to know the instance's disk template
2675
    if instance.disk_template == constants.DT_EXT:
2676
      self._CheckMods("disk", self.op.disks, {},
2677
                      self._VerifyDiskModification)
2678
    else:
2679
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2680
                      self._VerifyDiskModification)
2681

    
2682
    # Prepare disk/NIC modifications
2683
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2684
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2685

    
2686
    # Check the validity of the `provider' parameter
2687
    if instance.disk_template in constants.DT_EXT:
2688
      for mod in self.diskmod:
2689
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2690
        if mod[0] == constants.DDM_ADD:
2691
          if ext_provider is None:
2692
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2693
                                       " '%s' missing, during disk add" %
2694
                                       (constants.DT_EXT,
2695
                                        constants.IDISK_PROVIDER),
2696
                                       errors.ECODE_NOENT)
2697
        elif mod[0] == constants.DDM_MODIFY:
2698
          if ext_provider:
2699
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2700
                                       " modification" %
2701
                                       constants.IDISK_PROVIDER,
2702
                                       errors.ECODE_INVAL)
2703
    else:
2704
      for mod in self.diskmod:
2705
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2706
        if ext_provider is not None:
2707
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2708
                                     " instances of type '%s'" %
2709
                                     (constants.IDISK_PROVIDER,
2710
                                      constants.DT_EXT),
2711
                                     errors.ECODE_INVAL)
2712

    
2713
    # OS change
2714
    if self.op.os_name and not self.op.force:
2715
      CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2716
                     self.op.force_variant)
2717
      instance_os = self.op.os_name
2718
    else:
2719
      instance_os = instance.os
2720

    
2721
    assert not (self.op.disk_template and self.op.disks), \
2722
      "Can't modify disk template and apply disk changes at the same time"
2723

    
2724
    if self.op.disk_template:
2725
      self._PreCheckDiskTemplate(pnode_info)
2726

    
2727
    # hvparams processing
2728
    if self.op.hvparams:
2729
      hv_type = instance.hypervisor
2730
      i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2731
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2732
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2733

    
2734
      # local check
2735
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2736
      CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2737
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2738
      self.hv_inst = i_hvdict # the new dict (without defaults)
2739
    else:
2740
      self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2741
                                              instance.hvparams)
2742
      self.hv_new = self.hv_inst = {}
2743

    
2744
    # beparams processing
2745
    if self.op.beparams:
2746
      i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2747
                                  use_none=True)
2748
      objects.UpgradeBeParams(i_bedict)
2749
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2750
      be_new = cluster.SimpleFillBE(i_bedict)
2751
      self.be_proposed = self.be_new = be_new # the new actual values
2752
      self.be_inst = i_bedict # the new dict (without defaults)
2753
    else:
2754
      self.be_new = self.be_inst = {}
2755
      self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2756
    be_old = cluster.FillBE(instance)
2757

    
2758
    # CPU param validation -- checking every time a parameter is
2759
    # changed to cover all cases where either CPU mask or vcpus have
2760
    # changed
2761
    if (constants.BE_VCPUS in self.be_proposed and
2762
        constants.HV_CPU_MASK in self.hv_proposed):
2763
      cpu_list = \
2764
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2765
      # Verify mask is consistent with number of vCPUs. Can skip this
2766
      # test if only 1 entry in the CPU mask, which means same mask
2767
      # is applied to all vCPUs.
2768
      if (len(cpu_list) > 1 and
2769
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2770
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2771
                                   " CPU mask [%s]" %
2772
                                   (self.be_proposed[constants.BE_VCPUS],
2773
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2774
                                   errors.ECODE_INVAL)
2775

    
2776
      # Only perform this test if a new CPU mask is given
2777
      if constants.HV_CPU_MASK in self.hv_new:
2778
        # Calculate the largest CPU number requested
2779
        max_requested_cpu = max(map(max, cpu_list))
2780
        # Check that all of the instance's nodes have enough physical CPUs to
2781
        # satisfy the requested CPU mask
2782
        _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2783
                                max_requested_cpu + 1, instance.hypervisor)
2784

    
2785
    # osparams processing
2786
    if self.op.osparams:
2787
      i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2788
      CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2789
      self.os_inst = i_osdict # the new dict (without defaults)
2790
    else:
2791
      self.os_inst = {}
2792

    
2793
    #TODO(dynmem): do the appropriate check involving MINMEM
2794
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2795
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2796
      mem_check_list = [pnode]
2797
      if be_new[constants.BE_AUTO_BALANCE]:
2798
        # either we changed auto_balance to yes or it was from before
2799
        mem_check_list.extend(instance.secondary_nodes)
2800
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2801
                                                  instance.hypervisor)
2802
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2803
                                         [instance.hypervisor], False)
2804
      pninfo = nodeinfo[pnode]
2805
      msg = pninfo.fail_msg
2806
      if msg:
2807
        # Assume the primary node is unreachable and go ahead
2808
        self.warn.append("Can't get info from primary node %s: %s" %
2809
                         (pnode, msg))
2810
      else:
2811
        (_, _, (pnhvinfo, )) = pninfo.payload
2812
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2813
          self.warn.append("Node data from primary node %s doesn't contain"
2814
                           " free memory information" % pnode)
2815
        elif instance_info.fail_msg:
2816
          self.warn.append("Can't get instance runtime information: %s" %
2817
                           instance_info.fail_msg)
2818
        else:
2819
          if instance_info.payload:
2820
            current_mem = int(instance_info.payload["memory"])
2821
          else:
2822
            # Assume instance not running
2823
            # (there is a slight race condition here, but it's not very
2824
            # probable, and we have no other way to check)
2825
            # TODO: Describe race condition
2826
            current_mem = 0
2827
          #TODO(dynmem): do the appropriate check involving MINMEM
2828
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2829
                      pnhvinfo["memory_free"])
2830
          if miss_mem > 0:
2831
            raise errors.OpPrereqError("This change will prevent the instance"
2832
                                       " from starting, due to %d MB of memory"
2833
                                       " missing on its primary node" %
2834
                                       miss_mem, errors.ECODE_NORES)
2835

    
2836
      if be_new[constants.BE_AUTO_BALANCE]:
2837
        for node, nres in nodeinfo.items():
2838
          if node not in instance.secondary_nodes:
2839
            continue
2840
          nres.Raise("Can't get info from secondary node %s" % node,
2841
                     prereq=True, ecode=errors.ECODE_STATE)
2842
          (_, _, (nhvinfo, )) = nres.payload
2843
          if not isinstance(nhvinfo.get("memory_free", None), int):
2844
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2845
                                       " memory information" % node,
2846
                                       errors.ECODE_STATE)
2847
          #TODO(dynmem): do the appropriate check involving MINMEM
2848
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2849
            raise errors.OpPrereqError("This change will prevent the instance"
2850
                                       " from failover to its secondary node"
2851
                                       " %s, due to not enough memory" % node,
2852
                                       errors.ECODE_STATE)
2853

    
2854
    if self.op.runtime_mem:
2855
      remote_info = self.rpc.call_instance_info(instance.primary_node,
2856
                                                instance.name,
2857
                                                instance.hypervisor)
2858
      remote_info.Raise("Error checking node %s" % instance.primary_node)
2859
      if not remote_info.payload: # not running already
2860
        raise errors.OpPrereqError("Instance %s is not running" %
2861
                                   instance.name, errors.ECODE_STATE)
2862

    
2863
      current_memory = remote_info.payload["memory"]
2864
      if (not self.op.force and
2865
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2866
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2867
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2868
                                   " and %d MB of memory unless --force is"
2869
                                   " given" %
2870
                                   (instance.name,
2871
                                    self.be_proposed[constants.BE_MINMEM],
2872
                                    self.be_proposed[constants.BE_MAXMEM]),
2873
                                   errors.ECODE_INVAL)
2874

    
2875
      delta = self.op.runtime_mem - current_memory
2876
      if delta > 0:
2877
        CheckNodeFreeMemory(self, instance.primary_node,
2878
                            "ballooning memory for instance %s" %
2879
                            instance.name, delta, instance.hypervisor)
2880

    
2881
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2882
      raise errors.OpPrereqError("Disk operations not supported for"
2883
                                 " diskless instances", errors.ECODE_INVAL)
2884

    
2885
    def _PrepareNicCreate(_, params, private):
2886
      self._PrepareNicModification(params, private, None, None,
2887
                                   {}, cluster, pnode)
2888
      return (None, None)
2889

    
2890
    def _PrepareNicMod(_, nic, params, private):
2891
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2892
                                   nic.nicparams, cluster, pnode)
2893
      return None
2894

    
2895
    def _PrepareNicRemove(_, params, __):
2896
      ip = params.ip
2897
      net = params.network
2898
      if net is not None and ip is not None:
2899
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2900

    
2901
    # Verify NIC changes (operating on copy)
2902
    nics = instance.nics[:]
2903
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2904
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2905
    if len(nics) > constants.MAX_NICS:
2906
      raise errors.OpPrereqError("Instance has too many network interfaces"
2907
                                 " (%d), cannot add more" % constants.MAX_NICS,
2908
                                 errors.ECODE_STATE)
2909

    
2910
    def _PrepareDiskMod(_, disk, params, __):
2911
      disk.name = params.get(constants.IDISK_NAME, None)
2912

    
2913
    # Verify disk changes (operating on a copy)
2914
    disks = copy.deepcopy(instance.disks)
2915
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2916
                        _PrepareDiskMod, None)
2917
    utils.ValidateDeviceNames("disk", disks)
2918
    if len(disks) > constants.MAX_DISKS:
2919
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2920
                                 " more" % constants.MAX_DISKS,
2921
                                 errors.ECODE_STATE)
2922
    disk_sizes = [disk.size for disk in instance.disks]
2923
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2924
                      self.diskmod if op == constants.DDM_ADD)
2925
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2926
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2927

    
2928
    if self.op.offline is not None and self.op.offline:
2929
      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2930
                         msg="can't change to offline")
2931

    
2932
    # Pre-compute NIC changes (necessary to use result in hooks)
2933
    self._nic_chgdesc = []
2934
    if self.nicmod:
2935
      # Operate on copies as this is still in prereq
2936
      nics = [nic.Copy() for nic in instance.nics]
2937
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2938
                          self._CreateNewNic, self._ApplyNicMods,
2939
                          self._RemoveNic)
2940
      # Verify that NIC names are unique and valid
2941
      utils.ValidateDeviceNames("NIC", nics)
2942
      self._new_nics = nics
2943
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2944
    else:
2945
      self._new_nics = None
2946
      ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2947

    
2948
    if not self.op.ignore_ipolicy:
2949
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2950
                                                              group_info)
2951

    
2952
      # Fill ispec with backend parameters
2953
      ispec[constants.ISPEC_SPINDLE_USE] = \
2954
        self.be_new.get(constants.BE_SPINDLE_USE, None)
2955
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2956
                                                         None)
2957

    
2958
      # Copy ispec to verify parameters with min/max values separately
2959
      if self.op.disk_template:
2960
        new_disk_template = self.op.disk_template
2961
      else:
2962
        new_disk_template = instance.disk_template
2963
      ispec_max = ispec.copy()
2964
      ispec_max[constants.ISPEC_MEM_SIZE] = \
2965
        self.be_new.get(constants.BE_MAXMEM, None)
2966
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
2967
                                                     new_disk_template)
2968
      ispec_min = ispec.copy()
2969
      ispec_min[constants.ISPEC_MEM_SIZE] = \
2970
        self.be_new.get(constants.BE_MINMEM, None)
2971
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
2972
                                                     new_disk_template)
2973

    
2974
      if (res_max or res_min):
2975
        # FIXME: Improve error message by including information about whether
2976
        # the upper or lower limit of the parameter fails the ipolicy.
2977
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
2978
               (group_info, group_info.name,
2979
                utils.CommaJoin(set(res_max + res_min))))
2980
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2981

    
2982
  def _ConvertPlainToDrbd(self, feedback_fn):
2983
    """Converts an instance from plain to drbd.
2984

2985
    """
2986
    feedback_fn("Converting template to drbd")
2987
    instance = self.instance
2988
    pnode = instance.primary_node
2989
    snode = self.op.remote_node
2990

    
2991
    assert instance.disk_template == constants.DT_PLAIN
2992

    
2993
    # create a fake disk info for _GenerateDiskTemplate
2994
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
2995
                  constants.IDISK_VG: d.logical_id[0],
2996
                  constants.IDISK_NAME: d.name}
2997
                 for d in instance.disks]
2998
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
2999
                                     instance.name, pnode, [snode],
3000
                                     disk_info, None, None, 0, feedback_fn,
3001
                                     self.diskparams)
3002
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3003
                                        self.diskparams)
3004
    p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
3005
    s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
3006
    info = GetInstanceInfoText(instance)
3007
    feedback_fn("Creating additional volumes...")
3008
    # first, create the missing data and meta devices
3009
    for disk in anno_disks:
3010
      # unfortunately this is... not too nice
3011
      CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3012
                           info, True, p_excl_stor)
3013
      for child in disk.children:
3014
        CreateSingleBlockDev(self, snode, instance, child, info, True,
3015
                             s_excl_stor)
3016
    # at this stage, all new LVs have been created, we can rename the
3017
    # old ones
3018
    feedback_fn("Renaming original volumes...")
3019
    rename_list = [(o, n.children[0].logical_id)
3020
                   for (o, n) in zip(instance.disks, new_disks)]
3021
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
3022
    result.Raise("Failed to rename original LVs")
3023

    
3024
    feedback_fn("Initializing DRBD devices...")
3025
    # all child devices are in place, we can now create the DRBD devices
3026
    try:
3027
      for disk in anno_disks:
3028
        for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3029
          f_create = node == pnode
3030
          CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3031
                               excl_stor)
3032
    except errors.GenericError, e:
3033
      feedback_fn("Initializing of DRBD devices failed;"
3034
                  " renaming back original volumes...")
3035
      for disk in new_disks:
3036
        self.cfg.SetDiskID(disk, pnode)
3037
      rename_back_list = [(n.children[0], o.logical_id)
3038
                          for (n, o) in zip(new_disks, instance.disks)]
3039
      result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3040
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3041
      raise
3042

    
3043
    # at this point, the instance has been modified
3044
    instance.disk_template = constants.DT_DRBD8
3045
    instance.disks = new_disks
3046
    self.cfg.Update(instance, feedback_fn)
3047

    
3048
    # Release node locks while waiting for sync
3049
    ReleaseLocks(self, locking.LEVEL_NODE)
3050

    
3051
    # disks are created, waiting for sync
3052
    disk_abort = not WaitForSync(self, instance,
3053
                                 oneshot=not self.op.wait_for_sync)
3054
    if disk_abort:
3055
      raise errors.OpExecError("There are some degraded disks for"
3056
                               " this instance, please cleanup manually")
3057

    
3058
    # Node resource locks will be released by caller
3059

    
3060
  def _ConvertDrbdToPlain(self, feedback_fn):
3061
    """Converts an instance from drbd to plain.
3062

3063
    """
3064
    instance = self.instance
3065

    
3066
    assert len(instance.secondary_nodes) == 1
3067
    assert instance.disk_template == constants.DT_DRBD8
3068

    
3069
    pnode = instance.primary_node
3070
    snode = instance.secondary_nodes[0]
3071
    feedback_fn("Converting template to plain")
3072

    
3073
    old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3074
    new_disks = [d.children[0] for d in instance.disks]
3075

    
3076
    # copy over size, mode and name
3077
    for parent, child in zip(old_disks, new_disks):
3078
      child.size = parent.size
3079
      child.mode = parent.mode
3080
      child.name = parent.name
3081

    
3082
    # this is a DRBD disk, return its port to the pool
3083
    # NOTE: this must be done right before the call to cfg.Update!
3084
    for disk in old_disks:
3085
      tcp_port = disk.logical_id[2]
3086
      self.cfg.AddTcpUdpPort(tcp_port)
3087

    
3088
    # update instance structure
3089
    instance.disks = new_disks
3090
    instance.disk_template = constants.DT_PLAIN
3091
    _UpdateIvNames(0, instance.disks)
3092
    self.cfg.Update(instance, feedback_fn)
3093

    
3094
    # Release locks in case removing disks takes a while
3095
    ReleaseLocks(self, locking.LEVEL_NODE)
3096

    
3097
    feedback_fn("Removing volumes on the secondary node...")
3098
    for disk in old_disks:
3099
      self.cfg.SetDiskID(disk, snode)
3100
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3101
      if msg:
3102
        self.LogWarning("Could not remove block device %s on node %s,"
3103
                        " continuing anyway: %s", disk.iv_name, snode, msg)
3104

    
3105
    feedback_fn("Removing unneeded volumes on the primary node...")
3106
    for idx, disk in enumerate(old_disks):
3107
      meta = disk.children[1]
3108
      self.cfg.SetDiskID(meta, pnode)
3109
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3110
      if msg:
3111
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3112
                        " continuing anyway: %s", idx, pnode, msg)
3113

    
3114
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3115
    self.LogInfo("Trying to hotplug device...")
3116
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3117
                                          self.instance, action, dev_type,
3118
                                          (device, self.instance),
3119
                                          extra, seq)
3120
    if result.fail_msg:
3121
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3122
      self.LogInfo("Continuing execution..")
3123
    else:
3124
      self.LogInfo("Hotplug done.")
3125

    
3126
  def _CreateNewDisk(self, idx, params, _):
3127
    """Creates a new disk.
3128

3129
    """
3130
    instance = self.instance
3131

    
3132
    # add a new disk
3133
    if instance.disk_template in constants.DTS_FILEBASED:
3134
      (file_driver, file_path) = instance.disks[0].logical_id
3135
      file_path = os.path.dirname(file_path)
3136
    else:
3137
      file_driver = file_path = None
3138

    
3139
    disk = \
3140
      GenerateDiskTemplate(self, instance.disk_template, instance.name,
3141
                           instance.primary_node, instance.secondary_nodes,
3142
                           [params], file_path, file_driver, idx,
3143
                           self.Log, self.diskparams)[0]
3144

    
3145
    new_disks = CreateDisks(self, instance, disks=[disk])
3146

    
3147
    if self.cluster.prealloc_wipe_disks:
3148
      # Wipe new disk
3149
      WipeOrCleanupDisks(self, instance,
3150
                         disks=[(idx, disk, 0)],
3151
                         cleanup=new_disks)
3152

    
3153
    if self.op.hotplug:
3154
      # _, device_info = AssembleInstanceDisks(self, self.instance,
3155
      #                                       [disk], check=False)
3156
      self.cfg.SetDiskID(disk, self.instance.primary_node)
3157
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3158
                                               (disk, self.instance),
3159
                                               self.instance.name, True, idx)
3160
      if result.fail_msg:
3161
        self.LogWarning("Can't assemble newly created disk %d: %s",
3162
                        idx, result.fail_msg)
3163
      else:
3164
        # _, _, dev_path = device_info[0]
3165
        _, link_name = result.payload
3166
        self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3167
                            constants.HOTPLUG_TARGET_DISK,
3168
                            disk, link_name, idx)
3169

    
3170
    return (disk, [
3171
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3172
      ])
3173

    
3174
  @staticmethod
3175
  def _ModifyDisk(idx, disk, params, _):
3176
    """Modifies a disk.
3177

3178
    """
3179
    changes = []
3180
    mode = params.get(constants.IDISK_MODE, None)
3181
    if mode:
3182
      disk.mode = mode
3183
      changes.append(("disk.mode/%d" % idx, disk.mode))
3184

    
3185
    name = params.get(constants.IDISK_NAME, None)
3186
    disk.name = name
3187
    changes.append(("disk.name/%d" % idx, disk.name))
3188

    
3189
    return changes
3190

    
3191
  def _RemoveDisk(self, idx, root, _):
3192
    """Removes a disk.
3193

3194
    """
3195
    if self.op.hotplug:
3196
      self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3197
                          constants.HOTPLUG_TARGET_DISK,
3198
                          root, None, idx)
3199
      ShutdownInstanceDisks(self, self.instance, [root])
3200

    
3201
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3202
    for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3203
      self.cfg.SetDiskID(disk, node)
3204
      msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3205
      if msg:
3206
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3207
                        " continuing anyway", idx, node, msg)
3208

    
3209
    # if this is a DRBD disk, return its port to the pool
3210
    if root.dev_type in constants.LDS_DRBD:
3211
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3212

    
3213
  def _CreateNewNic(self, idx, params, private):
3214
    """Creates data structure for a new network interface.
3215

3216
    """
3217
    mac = params[constants.INIC_MAC]
3218
    ip = params.get(constants.INIC_IP, None)
3219
    net = params.get(constants.INIC_NETWORK, None)
3220
    name = params.get(constants.INIC_NAME, None)
3221
    net_uuid = self.cfg.LookupNetwork(net)
3222
    #TODO: not private.filled?? can a nic have no nicparams??
3223
    nicparams = private.filled
3224
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3225
                       nicparams=nicparams)
3226
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3227

    
3228
    if self.op.hotplug:
3229
      self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3230
                          constants.HOTPLUG_TARGET_NIC,
3231
                          nobj, None, idx)
3232

    
3233
    desc = [
3234
      ("nic.%d" % idx,
3235
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3236
       (mac, ip, private.filled[constants.NIC_MODE],
3237
       private.filled[constants.NIC_LINK], net)),
3238
      ]
3239

    
3240
    return (nobj, desc)
3241

    
3242
  def _ApplyNicMods(self, idx, nic, params, private):
3243
    """Modifies a network interface.
3244

3245
    """
3246
    changes = []
3247

    
3248
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3249
      if key in params:
3250
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3251
        setattr(nic, key, params[key])
3252

    
3253
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3254
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3255
    if new_net_uuid != nic.network:
3256
      changes.append(("nic.network/%d" % idx, new_net))
3257
      nic.network = new_net_uuid
3258

    
3259
    if private.filled:
3260
      nic.nicparams = private.filled
3261

    
3262
      for (key, val) in nic.nicparams.items():
3263
        changes.append(("nic.%s/%d" % (key, idx), val))
3264

    
3265
    if self.op.hotplug:
3266
      self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3267
                          constants.HOTPLUG_TARGET_NIC,
3268
                          nic, None, idx)
3269

    
3270
    return changes
3271

    
3272
  def _RemoveNic(self, idx, nic, _):
3273
    if self.op.hotplug:
3274
      self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3275
                          constants.HOTPLUG_TARGET_NIC,
3276
                          nic, None, idx)
3277

    
3278
  def Exec(self, feedback_fn):
3279
    """Modifies an instance.
3280

3281
    All parameters take effect only at the next restart of the instance.
3282

3283
    """
3284
    # Process here the warnings from CheckPrereq, as we don't have a
3285
    # feedback_fn there.
3286
    # TODO: Replace with self.LogWarning
3287
    for warn in self.warn:
3288
      feedback_fn("WARNING: %s" % warn)
3289

    
3290
    assert ((self.op.disk_template is None) ^
3291
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3292
      "Not owning any node resource locks"
3293

    
3294
    result = []
3295
    instance = self.instance
3296

    
3297
    # New primary node
3298
    if self.op.pnode:
3299
      instance.primary_node = self.op.pnode
3300

    
3301
    # runtime memory
3302
    if self.op.runtime_mem:
3303
      rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3304
                                                     instance,
3305
                                                     self.op.runtime_mem)
3306
      rpcres.Raise("Cannot modify instance runtime memory")
3307
      result.append(("runtime_memory", self.op.runtime_mem))
3308

    
3309
    # Apply disk changes
3310
    _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3311
                        self._CreateNewDisk, self._ModifyDisk,
3312
                        self._RemoveDisk)
3313
    _UpdateIvNames(0, instance.disks)
3314

    
3315
    if self.op.disk_template:
3316
      if __debug__:
3317
        check_nodes = set(instance.all_nodes)
3318
        if self.op.remote_node:
3319
          check_nodes.add(self.op.remote_node)
3320
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3321
          owned = self.owned_locks(level)
3322
          assert not (check_nodes - owned), \
3323
            ("Not owning the correct locks, owning %r, expected at least %r" %
3324
             (owned, check_nodes))
3325

    
3326
      r_shut = ShutdownInstanceDisks(self, instance)
3327
      if not r_shut:
3328
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3329
                                 " proceed with disk template conversion")
3330
      mode = (instance.disk_template, self.op.disk_template)
3331
      try:
3332
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3333
      except:
3334
        self.cfg.ReleaseDRBDMinors(instance.name)
3335
        raise
3336
      result.append(("disk_template", self.op.disk_template))
3337

    
3338
      assert instance.disk_template == self.op.disk_template, \
3339
        ("Expected disk template '%s', found '%s'" %
3340
         (self.op.disk_template, instance.disk_template))
3341

    
3342
    # Release node and resource locks if there are any (they might already have
3343
    # been released during disk conversion)
3344
    ReleaseLocks(self, locking.LEVEL_NODE)
3345
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3346

    
3347
    # Apply NIC changes
3348
    if self._new_nics is not None:
3349
      instance.nics = self._new_nics
3350
      result.extend(self._nic_chgdesc)
3351

    
3352
    # hvparams changes
3353
    if self.op.hvparams:
3354
      instance.hvparams = self.hv_inst
3355
      for key, val in self.op.hvparams.iteritems():
3356
        result.append(("hv/%s" % key, val))
3357

    
3358
    # beparams changes
3359
    if self.op.beparams:
3360
      instance.beparams = self.be_inst
3361
      for key, val in self.op.beparams.iteritems():
3362
        result.append(("be/%s" % key, val))
3363

    
3364
    # OS change
3365
    if self.op.os_name:
3366
      instance.os = self.op.os_name
3367

    
3368
    # osparams changes
3369
    if self.op.osparams:
3370
      instance.osparams = self.os_inst
3371
      for key, val in self.op.osparams.iteritems():
3372
        result.append(("os/%s" % key, val))
3373

    
3374
    if self.op.offline is None:
3375
      # Ignore
3376
      pass
3377
    elif self.op.offline:
3378
      # Mark instance as offline
3379
      self.cfg.MarkInstanceOffline(instance.name)
3380
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3381
    else:
3382
      # Mark instance as online, but stopped
3383
      self.cfg.MarkInstanceDown(instance.name)
3384
      result.append(("admin_state", constants.ADMINST_DOWN))
3385

    
3386
    self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3387

    
3388
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3389
                self.owned_locks(locking.LEVEL_NODE)), \
3390
      "All node locks should have been released by now"
3391

    
3392
    return result
3393

    
3394
  _DISK_CONVERSIONS = {
3395
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3396
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3397
    }
3398

    
3399

    
3400
class LUInstanceChangeGroup(LogicalUnit):
3401
  HPATH = "instance-change-group"
3402
  HTYPE = constants.HTYPE_INSTANCE
3403
  REQ_BGL = False
3404

    
3405
  def ExpandNames(self):
3406
    self.share_locks = ShareAll()
3407

    
3408
    self.needed_locks = {
3409
      locking.LEVEL_NODEGROUP: [],
3410
      locking.LEVEL_NODE: [],
3411
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3412
      }
3413

    
3414
    self._ExpandAndLockInstance()
3415

    
3416
    if self.op.target_groups:
3417
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3418
                                  self.op.target_groups)
3419
    else:
3420
      self.req_target_uuids = None
3421

    
3422
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3423

    
3424
  def DeclareLocks(self, level):
3425
    if level == locking.LEVEL_NODEGROUP:
3426
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3427

    
3428
      if self.req_target_uuids:
3429
        lock_groups = set(self.req_target_uuids)
3430

    
3431
        # Lock all groups used by instance optimistically; this requires going
3432
        # via the node before it's locked, requiring verification later on
3433
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3434
        lock_groups.update(instance_groups)
3435
      else:
3436
        # No target groups, need to lock all of them
3437
        lock_groups = locking.ALL_SET
3438

    
3439
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3440

    
3441
    elif level == locking.LEVEL_NODE:
3442
      if self.req_target_uuids:
3443
        # Lock all nodes used by instances
3444
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3445
        self._LockInstancesNodes()
3446

    
3447
        # Lock all nodes in all potential target groups
3448
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3449
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3450
        member_nodes = [node_name
3451
                        for group in lock_groups
3452
                        for node_name in self.cfg.GetNodeGroup(group).members]
3453
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3454
      else:
3455
        # Lock all nodes as all groups are potential targets
3456
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3457

    
3458
  def CheckPrereq(self):
3459
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3460
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3461
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3462

    
3463
    assert (self.req_target_uuids is None or
3464
            owned_groups.issuperset(self.req_target_uuids))
3465
    assert owned_instances == set([self.op.instance_name])
3466

    
3467
    # Get instance information
3468
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3469

    
3470
    # Check if node groups for locked instance are still correct
3471
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3472
      ("Instance %s's nodes changed while we kept the lock" %
3473
       self.op.instance_name)
3474

    
3475
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3476
                                          owned_groups)
3477

    
3478
    if self.req_target_uuids:
3479
      # User requested specific target groups
3480
      self.target_uuids = frozenset(self.req_target_uuids)
3481
    else:
3482
      # All groups except those used by the instance are potential targets
3483
      self.target_uuids = owned_groups - inst_groups
3484

    
3485
    conflicting_groups = self.target_uuids & inst_groups
3486
    if conflicting_groups:
3487
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3488
                                 " used by the instance '%s'" %
3489
                                 (utils.CommaJoin(conflicting_groups),
3490
                                  self.op.instance_name),
3491
                                 errors.ECODE_INVAL)
3492

    
3493
    if not self.target_uuids:
3494
      raise errors.OpPrereqError("There are no possible target groups",
3495
                                 errors.ECODE_INVAL)
3496

    
3497
  def BuildHooksEnv(self):
3498
    """Build hooks env.
3499

3500
    """
3501
    assert self.target_uuids
3502

    
3503
    env = {
3504
      "TARGET_GROUPS": " ".join(self.target_uuids),
3505
      }
3506

    
3507
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3508

    
3509
    return env
3510

    
3511
  def BuildHooksNodes(self):
3512
    """Build hooks nodes.
3513

3514
    """
3515
    mn = self.cfg.GetMasterNode()
3516
    return ([mn], [mn])
3517

    
3518
  def Exec(self, feedback_fn):
3519
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3520

    
3521
    assert instances == [self.op.instance_name], "Instance not locked"
3522

    
3523
    req = iallocator.IAReqGroupChange(instances=instances,
3524
                                      target_groups=list(self.target_uuids))
3525
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3526

    
3527
    ial.Run(self.op.iallocator)
3528

    
3529
    if not ial.success:
3530
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3531
                                 " instance '%s' using iallocator '%s': %s" %
3532
                                 (self.op.instance_name, self.op.iallocator,
3533
                                  ial.info), errors.ECODE_NORES)
3534

    
3535
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3536

    
3537
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3538
                 " instance '%s'", len(jobs), self.op.instance_name)
3539

    
3540
    return ResultWithJobs(jobs)