Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ a295eb80

History | View | Annotate | Download (135.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176

    
177
    if net is None or net.lower() == constants.VALUE_NONE:
178
      net = None
179
    else:
180
      if nic_mode_req is not None or link is not None:
181
        raise errors.OpPrereqError("If network is given, no mode or link"
182
                                   " is allowed to be passed",
183
                                   errors.ECODE_INVAL)
184

    
185
    # ip validity checks
186
    if ip is None or ip.lower() == constants.VALUE_NONE:
187
      nic_ip = None
188
    elif ip.lower() == constants.VALUE_AUTO:
189
      if not op.name_check:
190
        raise errors.OpPrereqError("IP address set to auto but name checks"
191
                                   " have been skipped",
192
                                   errors.ECODE_INVAL)
193
      nic_ip = default_ip
194
    else:
195
      # We defer pool operations until later, so that the iallocator has
196
      # filled in the instance's node(s) dimara
197
      if ip.lower() == constants.NIC_IP_POOL:
198
        if net is None:
199
          raise errors.OpPrereqError("if ip=pool, parameter network"
200
                                     " must be passed too",
201
                                     errors.ECODE_INVAL)
202

    
203
      elif not netutils.IPAddress.IsValid(ip):
204
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
205
                                   errors.ECODE_INVAL)
206

    
207
      nic_ip = ip
208

    
209
    # TODO: check the ip address for uniqueness
210
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
211
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
212
                                 errors.ECODE_INVAL)
213

    
214
    # MAC address verification
215
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
216
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
217
      mac = utils.NormalizeAndValidateMac(mac)
218

    
219
      try:
220
        # TODO: We need to factor this out
221
        cfg.ReserveMAC(mac, ec_id)
222
      except errors.ReservationError:
223
        raise errors.OpPrereqError("MAC address %s already in use"
224
                                   " in cluster" % mac,
225
                                   errors.ECODE_NOTUNIQUE)
226

    
227
    #  Build nic parameters
228
    nicparams = {}
229
    if nic_mode_req:
230
      nicparams[constants.NIC_MODE] = nic_mode
231
    if link:
232
      nicparams[constants.NIC_LINK] = link
233

    
234
    check_params = cluster.SimpleFillNIC(nicparams)
235
    objects.NIC.CheckParameterSyntax(check_params)
236
    net_uuid = cfg.LookupNetwork(net)
237
    name = nic.get(constants.INIC_NAME, None)
238
    if name is not None and name.lower() == constants.VALUE_NONE:
239
      name = None
240
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
241
                          network=net_uuid, nicparams=nicparams)
242
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
243
    nics.append(nic_obj)
244

    
245
  return nics
246

    
247

    
248
def _CheckForConflictingIp(lu, ip, node):
249
  """In case of conflicting IP address raise error.
250

251
  @type ip: string
252
  @param ip: IP address
253
  @type node: string
254
  @param node: node name
255

256
  """
257
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
258
  if conf_net is not None:
259
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
260
                                " network %s, but the target NIC does not." %
261
                                (ip, conf_net)),
262
                               errors.ECODE_STATE)
263

    
264
  return (None, None)
265

    
266

    
267
def _ComputeIPolicyInstanceSpecViolation(
268
  ipolicy, instance_spec, disk_template,
269
  _compute_fn=ComputeIPolicySpecViolation):
270
  """Compute if instance specs meets the specs of ipolicy.
271

272
  @type ipolicy: dict
273
  @param ipolicy: The ipolicy to verify against
274
  @param instance_spec: dict
275
  @param instance_spec: The instance spec to verify
276
  @type disk_template: string
277
  @param disk_template: the disk template of the instance
278
  @param _compute_fn: The function to verify ipolicy (unittest only)
279
  @see: L{ComputeIPolicySpecViolation}
280

281
  """
282
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
283
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
284
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
285
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
286
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
287
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
288

    
289
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
290
                     disk_sizes, spindle_use, disk_template)
291

    
292

    
293
def _CheckOSVariant(os_obj, name):
294
  """Check whether an OS name conforms to the os variants specification.
295

296
  @type os_obj: L{objects.OS}
297
  @param os_obj: OS object to check
298
  @type name: string
299
  @param name: OS name passed by the user, to check for validity
300

301
  """
302
  variant = objects.OS.GetVariant(name)
303
  if not os_obj.supported_variants:
304
    if variant:
305
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
306
                                 " passed)" % (os_obj.name, variant),
307
                                 errors.ECODE_INVAL)
308
    return
309
  if not variant:
310
    raise errors.OpPrereqError("OS name must include a variant",
311
                               errors.ECODE_INVAL)
312

    
313
  if variant not in os_obj.supported_variants:
314
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
315

    
316

    
317
class LUInstanceCreate(LogicalUnit):
318
  """Create an instance.
319

320
  """
321
  HPATH = "instance-add"
322
  HTYPE = constants.HTYPE_INSTANCE
323
  REQ_BGL = False
324

    
325
  def CheckArguments(self):
326
    """Check arguments.
327

328
    """
329
    # do not require name_check to ease forward/backward compatibility
330
    # for tools
331
    if self.op.no_install and self.op.start:
332
      self.LogInfo("No-installation mode selected, disabling startup")
333
      self.op.start = False
334
    # validate/normalize the instance name
335
    self.op.instance_name = \
336
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
337

    
338
    if self.op.ip_check and not self.op.name_check:
339
      # TODO: make the ip check more flexible and not depend on the name check
340
      raise errors.OpPrereqError("Cannot do IP address check without a name"
341
                                 " check", errors.ECODE_INVAL)
342

    
343
    # check nics' parameter names
344
    for nic in self.op.nics:
345
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
346
    # check that NIC's parameters names are unique and valid
347
    utils.ValidateDeviceNames("NIC", self.op.nics)
348

    
349
    # check that disk's names are unique and valid
350
    utils.ValidateDeviceNames("disk", self.op.disks)
351

    
352
    cluster = self.cfg.GetClusterInfo()
353
    if not self.op.disk_template in cluster.enabled_disk_templates:
354
      raise errors.OpPrereqError("Cannot create an instance with disk template"
355
                                 " '%s', because it is not enabled in the"
356
                                 " cluster. Enabled disk templates are: %s." %
357
                                 (self.op.disk_template,
358
                                  ",".join(cluster.enabled_disk_templates)))
359

    
360
    # check disks. parameter names and consistent adopt/no-adopt strategy
361
    has_adopt = has_no_adopt = False
362
    for disk in self.op.disks:
363
      if self.op.disk_template != constants.DT_EXT:
364
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
365
      if constants.IDISK_ADOPT in disk:
366
        has_adopt = True
367
      else:
368
        has_no_adopt = True
369
    if has_adopt and has_no_adopt:
370
      raise errors.OpPrereqError("Either all disks are adopted or none is",
371
                                 errors.ECODE_INVAL)
372
    if has_adopt:
373
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
374
        raise errors.OpPrereqError("Disk adoption is not supported for the"
375
                                   " '%s' disk template" %
376
                                   self.op.disk_template,
377
                                   errors.ECODE_INVAL)
378
      if self.op.iallocator is not None:
379
        raise errors.OpPrereqError("Disk adoption not allowed with an"
380
                                   " iallocator script", errors.ECODE_INVAL)
381
      if self.op.mode == constants.INSTANCE_IMPORT:
382
        raise errors.OpPrereqError("Disk adoption not allowed for"
383
                                   " instance import", errors.ECODE_INVAL)
384
    else:
385
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
386
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
387
                                   " but no 'adopt' parameter given" %
388
                                   self.op.disk_template,
389
                                   errors.ECODE_INVAL)
390

    
391
    self.adopt_disks = has_adopt
392

    
393
    # instance name verification
394
    if self.op.name_check:
395
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
396
      self.op.instance_name = self.hostname1.name
397
      # used in CheckPrereq for ip ping check
398
      self.check_ip = self.hostname1.ip
399
    else:
400
      self.check_ip = None
401

    
402
    # file storage checks
403
    if (self.op.file_driver and
404
        not self.op.file_driver in constants.FILE_DRIVER):
405
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
406
                                 self.op.file_driver, errors.ECODE_INVAL)
407

    
408
    if self.op.disk_template == constants.DT_FILE:
409
      opcodes.RequireFileStorage()
410
    elif self.op.disk_template == constants.DT_SHARED_FILE:
411
      opcodes.RequireSharedFileStorage()
412

    
413
    ### Node/iallocator related checks
414
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
415

    
416
    if self.op.pnode is not None:
417
      if self.op.disk_template in constants.DTS_INT_MIRROR:
418
        if self.op.snode is None:
419
          raise errors.OpPrereqError("The networked disk templates need"
420
                                     " a mirror node", errors.ECODE_INVAL)
421
      elif self.op.snode:
422
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
423
                        " template")
424
        self.op.snode = None
425

    
426
    _CheckOpportunisticLocking(self.op)
427

    
428
    self._cds = GetClusterDomainSecret()
429

    
430
    if self.op.mode == constants.INSTANCE_IMPORT:
431
      # On import force_variant must be True, because if we forced it at
432
      # initial install, our only chance when importing it back is that it
433
      # works again!
434
      self.op.force_variant = True
435

    
436
      if self.op.no_install:
437
        self.LogInfo("No-installation mode has no effect during import")
438

    
439
    elif self.op.mode == constants.INSTANCE_CREATE:
440
      if self.op.os_type is None:
441
        raise errors.OpPrereqError("No guest OS specified",
442
                                   errors.ECODE_INVAL)
443
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
444
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
445
                                   " installation" % self.op.os_type,
446
                                   errors.ECODE_STATE)
447
      if self.op.disk_template is None:
448
        raise errors.OpPrereqError("No disk template specified",
449
                                   errors.ECODE_INVAL)
450

    
451
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
452
      # Check handshake to ensure both clusters have the same domain secret
453
      src_handshake = self.op.source_handshake
454
      if not src_handshake:
455
        raise errors.OpPrereqError("Missing source handshake",
456
                                   errors.ECODE_INVAL)
457

    
458
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
459
                                                           src_handshake)
460
      if errmsg:
461
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
462
                                   errors.ECODE_INVAL)
463

    
464
      # Load and check source CA
465
      self.source_x509_ca_pem = self.op.source_x509_ca
466
      if not self.source_x509_ca_pem:
467
        raise errors.OpPrereqError("Missing source X509 CA",
468
                                   errors.ECODE_INVAL)
469

    
470
      try:
471
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
472
                                                    self._cds)
473
      except OpenSSL.crypto.Error, err:
474
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
475
                                   (err, ), errors.ECODE_INVAL)
476

    
477
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
478
      if errcode is not None:
479
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
480
                                   errors.ECODE_INVAL)
481

    
482
      self.source_x509_ca = cert
483

    
484
      src_instance_name = self.op.source_instance_name
485
      if not src_instance_name:
486
        raise errors.OpPrereqError("Missing source instance name",
487
                                   errors.ECODE_INVAL)
488

    
489
      self.source_instance_name = \
490
        netutils.GetHostname(name=src_instance_name).name
491

    
492
    else:
493
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
494
                                 self.op.mode, errors.ECODE_INVAL)
495

    
496
  def ExpandNames(self):
497
    """ExpandNames for CreateInstance.
498

499
    Figure out the right locks for instance creation.
500

501
    """
502
    self.needed_locks = {}
503

    
504
    instance_name = self.op.instance_name
505
    # this is just a preventive check, but someone might still add this
506
    # instance in the meantime, and creation will fail at lock-add time
507
    if instance_name in self.cfg.GetInstanceList():
508
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
509
                                 instance_name, errors.ECODE_EXISTS)
510

    
511
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
512

    
513
    if self.op.iallocator:
514
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
515
      # specifying a group on instance creation and then selecting nodes from
516
      # that group
517
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
518
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
519

    
520
      if self.op.opportunistic_locking:
521
        self.opportunistic_locks[locking.LEVEL_NODE] = True
522
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
523
    else:
524
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
525
      nodelist = [self.op.pnode]
526
      if self.op.snode is not None:
527
        self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
528
        nodelist.append(self.op.snode)
529
      self.needed_locks[locking.LEVEL_NODE] = nodelist
530

    
531
    # in case of import lock the source node too
532
    if self.op.mode == constants.INSTANCE_IMPORT:
533
      src_node = self.op.src_node
534
      src_path = self.op.src_path
535

    
536
      if src_path is None:
537
        self.op.src_path = src_path = self.op.instance_name
538

    
539
      if src_node is None:
540
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
541
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
542
        self.op.src_node = None
543
        if os.path.isabs(src_path):
544
          raise errors.OpPrereqError("Importing an instance from a path"
545
                                     " requires a source node option",
546
                                     errors.ECODE_INVAL)
547
      else:
548
        self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
549
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
550
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
551
        if not os.path.isabs(src_path):
552
          self.op.src_path = src_path = \
553
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
554

    
555
    self.needed_locks[locking.LEVEL_NODE_RES] = \
556
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
557

    
558
  def _RunAllocator(self):
559
    """Run the allocator based on input opcode.
560

561
    """
562
    if self.op.opportunistic_locking:
563
      # Only consider nodes for which a lock is held
564
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
565
    else:
566
      node_whitelist = None
567

    
568
    #TODO Export network to iallocator so that it chooses a pnode
569
    #     in a nodegroup that has the desired network connected to
570
    req = _CreateInstanceAllocRequest(self.op, self.disks,
571
                                      self.nics, self.be_full,
572
                                      node_whitelist)
573
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
574

    
575
    ial.Run(self.op.iallocator)
576

    
577
    if not ial.success:
578
      # When opportunistic locks are used only a temporary failure is generated
579
      if self.op.opportunistic_locking:
580
        ecode = errors.ECODE_TEMP_NORES
581
      else:
582
        ecode = errors.ECODE_NORES
583

    
584
      raise errors.OpPrereqError("Can't compute nodes using"
585
                                 " iallocator '%s': %s" %
586
                                 (self.op.iallocator, ial.info),
587
                                 ecode)
588

    
589
    self.op.pnode = ial.result[0]
590
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
591
                 self.op.instance_name, self.op.iallocator,
592
                 utils.CommaJoin(ial.result))
593

    
594
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
595

    
596
    if req.RequiredNodes() == 2:
597
      self.op.snode = ial.result[1]
598

    
599
  def BuildHooksEnv(self):
600
    """Build hooks env.
601

602
    This runs on master, primary and secondary nodes of the instance.
603

604
    """
605
    env = {
606
      "ADD_MODE": self.op.mode,
607
      }
608
    if self.op.mode == constants.INSTANCE_IMPORT:
609
      env["SRC_NODE"] = self.op.src_node
610
      env["SRC_PATH"] = self.op.src_path
611
      env["SRC_IMAGES"] = self.src_images
612

    
613
    env.update(BuildInstanceHookEnv(
614
      name=self.op.instance_name,
615
      primary_node=self.op.pnode,
616
      secondary_nodes=self.secondaries,
617
      status=self.op.start,
618
      os_type=self.op.os_type,
619
      minmem=self.be_full[constants.BE_MINMEM],
620
      maxmem=self.be_full[constants.BE_MAXMEM],
621
      vcpus=self.be_full[constants.BE_VCPUS],
622
      nics=NICListToTuple(self, self.nics),
623
      disk_template=self.op.disk_template,
624
      disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
625
              d[constants.IDISK_MODE]) for d in self.disks],
626
      bep=self.be_full,
627
      hvp=self.hv_full,
628
      hypervisor_name=self.op.hypervisor,
629
      tags=self.op.tags,
630
      ))
631

    
632
    return env
633

    
634
  def BuildHooksNodes(self):
635
    """Build hooks nodes.
636

637
    """
638
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
639
    return nl, nl
640

    
641
  def _ReadExportInfo(self):
642
    """Reads the export information from disk.
643

644
    It will override the opcode source node and path with the actual
645
    information, if these two were not specified before.
646

647
    @return: the export information
648

649
    """
650
    assert self.op.mode == constants.INSTANCE_IMPORT
651

    
652
    src_node = self.op.src_node
653
    src_path = self.op.src_path
654

    
655
    if src_node is None:
656
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
657
      exp_list = self.rpc.call_export_list(locked_nodes)
658
      found = False
659
      for node in exp_list:
660
        if exp_list[node].fail_msg:
661
          continue
662
        if src_path in exp_list[node].payload:
663
          found = True
664
          self.op.src_node = src_node = node
665
          self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
666
                                                       src_path)
667
          break
668
      if not found:
669
        raise errors.OpPrereqError("No export found for relative path %s" %
670
                                   src_path, errors.ECODE_INVAL)
671

    
672
    CheckNodeOnline(self, src_node)
673
    result = self.rpc.call_export_info(src_node, src_path)
674
    result.Raise("No export or invalid export found in dir %s" % src_path)
675

    
676
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
677
    if not export_info.has_section(constants.INISECT_EXP):
678
      raise errors.ProgrammerError("Corrupted export config",
679
                                   errors.ECODE_ENVIRON)
680

    
681
    ei_version = export_info.get(constants.INISECT_EXP, "version")
682
    if (int(ei_version) != constants.EXPORT_VERSION):
683
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
684
                                 (ei_version, constants.EXPORT_VERSION),
685
                                 errors.ECODE_ENVIRON)
686
    return export_info
687

    
688
  def _ReadExportParams(self, einfo):
689
    """Use export parameters as defaults.
690

691
    In case the opcode doesn't specify (as in override) some instance
692
    parameters, then try to use them from the export information, if
693
    that declares them.
694

695
    """
696
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
697

    
698
    if self.op.disk_template is None:
699
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
700
        self.op.disk_template = einfo.get(constants.INISECT_INS,
701
                                          "disk_template")
702
        if self.op.disk_template not in constants.DISK_TEMPLATES:
703
          raise errors.OpPrereqError("Disk template specified in configuration"
704
                                     " file is not one of the allowed values:"
705
                                     " %s" %
706
                                     " ".join(constants.DISK_TEMPLATES),
707
                                     errors.ECODE_INVAL)
708
      else:
709
        raise errors.OpPrereqError("No disk template specified and the export"
710
                                   " is missing the disk_template information",
711
                                   errors.ECODE_INVAL)
712

    
713
    if not self.op.disks:
714
      disks = []
715
      # TODO: import the disk iv_name too
716
      for idx in range(constants.MAX_DISKS):
717
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
718
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
719
          disks.append({constants.IDISK_SIZE: disk_sz})
720
      self.op.disks = disks
721
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
722
        raise errors.OpPrereqError("No disk info specified and the export"
723
                                   " is missing the disk information",
724
                                   errors.ECODE_INVAL)
725

    
726
    if not self.op.nics:
727
      nics = []
728
      for idx in range(constants.MAX_NICS):
729
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
730
          ndict = {}
731
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
732
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
733
            ndict[name] = v
734
          nics.append(ndict)
735
        else:
736
          break
737
      self.op.nics = nics
738

    
739
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
740
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
741

    
742
    if (self.op.hypervisor is None and
743
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
744
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
745

    
746
    if einfo.has_section(constants.INISECT_HYP):
747
      # use the export parameters but do not override the ones
748
      # specified by the user
749
      for name, value in einfo.items(constants.INISECT_HYP):
750
        if name not in self.op.hvparams:
751
          self.op.hvparams[name] = value
752

    
753
    if einfo.has_section(constants.INISECT_BEP):
754
      # use the parameters, without overriding
755
      for name, value in einfo.items(constants.INISECT_BEP):
756
        if name not in self.op.beparams:
757
          self.op.beparams[name] = value
758
        # Compatibility for the old "memory" be param
759
        if name == constants.BE_MEMORY:
760
          if constants.BE_MAXMEM not in self.op.beparams:
761
            self.op.beparams[constants.BE_MAXMEM] = value
762
          if constants.BE_MINMEM not in self.op.beparams:
763
            self.op.beparams[constants.BE_MINMEM] = value
764
    else:
765
      # try to read the parameters old style, from the main section
766
      for name in constants.BES_PARAMETERS:
767
        if (name not in self.op.beparams and
768
            einfo.has_option(constants.INISECT_INS, name)):
769
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
770

    
771
    if einfo.has_section(constants.INISECT_OSP):
772
      # use the parameters, without overriding
773
      for name, value in einfo.items(constants.INISECT_OSP):
774
        if name not in self.op.osparams:
775
          self.op.osparams[name] = value
776

    
777
  def _RevertToDefaults(self, cluster):
778
    """Revert the instance parameters to the default values.
779

780
    """
781
    # hvparams
782
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
783
    for name in self.op.hvparams.keys():
784
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
785
        del self.op.hvparams[name]
786
    # beparams
787
    be_defs = cluster.SimpleFillBE({})
788
    for name in self.op.beparams.keys():
789
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
790
        del self.op.beparams[name]
791
    # nic params
792
    nic_defs = cluster.SimpleFillNIC({})
793
    for nic in self.op.nics:
794
      for name in constants.NICS_PARAMETERS:
795
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
796
          del nic[name]
797
    # osparams
798
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
799
    for name in self.op.osparams.keys():
800
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
801
        del self.op.osparams[name]
802

    
803
  def _CalculateFileStorageDir(self):
804
    """Calculate final instance file storage dir.
805

806
    """
807
    # file storage dir calculation/check
808
    self.instance_file_storage_dir = None
809
    if self.op.disk_template in constants.DTS_FILEBASED:
810
      # build the full file storage dir path
811
      joinargs = []
812

    
813
      if self.op.disk_template == constants.DT_SHARED_FILE:
814
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
815
      else:
816
        get_fsd_fn = self.cfg.GetFileStorageDir
817

    
818
      cfg_storagedir = get_fsd_fn()
819
      if not cfg_storagedir:
820
        raise errors.OpPrereqError("Cluster file storage dir not defined",
821
                                   errors.ECODE_STATE)
822
      joinargs.append(cfg_storagedir)
823

    
824
      if self.op.file_storage_dir is not None:
825
        joinargs.append(self.op.file_storage_dir)
826

    
827
      joinargs.append(self.op.instance_name)
828

    
829
      # pylint: disable=W0142
830
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
831

    
832
  def CheckPrereq(self): # pylint: disable=R0914
833
    """Check prerequisites.
834

835
    """
836
    self._CalculateFileStorageDir()
837

    
838
    if self.op.mode == constants.INSTANCE_IMPORT:
839
      export_info = self._ReadExportInfo()
840
      self._ReadExportParams(export_info)
841
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
842
    else:
843
      self._old_instance_name = None
844

    
845
    if (not self.cfg.GetVGName() and
846
        self.op.disk_template not in constants.DTS_NOT_LVM):
847
      raise errors.OpPrereqError("Cluster does not support lvm-based"
848
                                 " instances", errors.ECODE_STATE)
849

    
850
    if (self.op.hypervisor is None or
851
        self.op.hypervisor == constants.VALUE_AUTO):
852
      self.op.hypervisor = self.cfg.GetHypervisorType()
853

    
854
    cluster = self.cfg.GetClusterInfo()
855
    enabled_hvs = cluster.enabled_hypervisors
856
    if self.op.hypervisor not in enabled_hvs:
857
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
858
                                 " cluster (%s)" %
859
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
860
                                 errors.ECODE_STATE)
861

    
862
    # Check tag validity
863
    for tag in self.op.tags:
864
      objects.TaggableObject.ValidateTag(tag)
865

    
866
    # check hypervisor parameter syntax (locally)
867
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
868
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
869
                                      self.op.hvparams)
870
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
871
    hv_type.CheckParameterSyntax(filled_hvp)
872
    self.hv_full = filled_hvp
873
    # check that we don't specify global parameters on an instance
874
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
875
                         "instance", "cluster")
876

    
877
    # fill and remember the beparams dict
878
    self.be_full = _ComputeFullBeParams(self.op, cluster)
879

    
880
    # build os parameters
881
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
882

    
883
    # now that hvp/bep are in final format, let's reset to defaults,
884
    # if told to do so
885
    if self.op.identify_defaults:
886
      self._RevertToDefaults(cluster)
887

    
888
    # NIC buildup
889
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
890
                             self.proc.GetECId())
891

    
892
    # disk checks/pre-build
893
    default_vg = self.cfg.GetVGName()
894
    self.disks = ComputeDisks(self.op, default_vg)
895

    
896
    if self.op.mode == constants.INSTANCE_IMPORT:
897
      disk_images = []
898
      for idx in range(len(self.disks)):
899
        option = "disk%d_dump" % idx
900
        if export_info.has_option(constants.INISECT_INS, option):
901
          # FIXME: are the old os-es, disk sizes, etc. useful?
902
          export_name = export_info.get(constants.INISECT_INS, option)
903
          image = utils.PathJoin(self.op.src_path, export_name)
904
          disk_images.append(image)
905
        else:
906
          disk_images.append(False)
907

    
908
      self.src_images = disk_images
909

    
910
      if self.op.instance_name == self._old_instance_name:
911
        for idx, nic in enumerate(self.nics):
912
          if nic.mac == constants.VALUE_AUTO:
913
            nic_mac_ini = "nic%d_mac" % idx
914
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
915

    
916
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
917

    
918
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
919
    if self.op.ip_check:
920
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
921
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
922
                                   (self.check_ip, self.op.instance_name),
923
                                   errors.ECODE_NOTUNIQUE)
924

    
925
    #### mac address generation
926
    # By generating here the mac address both the allocator and the hooks get
927
    # the real final mac address rather than the 'auto' or 'generate' value.
928
    # There is a race condition between the generation and the instance object
929
    # creation, which means that we know the mac is valid now, but we're not
930
    # sure it will be when we actually add the instance. If things go bad
931
    # adding the instance will abort because of a duplicate mac, and the
932
    # creation job will fail.
933
    for nic in self.nics:
934
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
935
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
936

    
937
    #### allocator run
938

    
939
    if self.op.iallocator is not None:
940
      self._RunAllocator()
941

    
942
    # Release all unneeded node locks
943
    keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
944
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
945
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
946
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
947

    
948
    assert (self.owned_locks(locking.LEVEL_NODE) ==
949
            self.owned_locks(locking.LEVEL_NODE_RES)), \
950
      "Node locks differ from node resource locks"
951

    
952
    #### node related checks
953

    
954
    # check primary node
955
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
956
    assert self.pnode is not None, \
957
      "Cannot retrieve locked node %s" % self.op.pnode
958
    if pnode.offline:
959
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
960
                                 pnode.name, errors.ECODE_STATE)
961
    if pnode.drained:
962
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
963
                                 pnode.name, errors.ECODE_STATE)
964
    if not pnode.vm_capable:
965
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
966
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
967

    
968
    self.secondaries = []
969

    
970
    # Fill in any IPs from IP pools. This must happen here, because we need to
971
    # know the nic's primary node, as specified by the iallocator
972
    for idx, nic in enumerate(self.nics):
973
      net_uuid = nic.network
974
      if net_uuid is not None:
975
        nobj = self.cfg.GetNetwork(net_uuid)
976
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
977
        if netparams is None:
978
          raise errors.OpPrereqError("No netparams found for network"
979
                                     " %s. Propably not connected to"
980
                                     " node's %s nodegroup" %
981
                                     (nobj.name, self.pnode.name),
982
                                     errors.ECODE_INVAL)
983
        self.LogInfo("NIC/%d inherits netparams %s" %
984
                     (idx, netparams.values()))
985
        nic.nicparams = dict(netparams)
986
        if nic.ip is not None:
987
          if nic.ip.lower() == constants.NIC_IP_POOL:
988
            try:
989
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
990
            except errors.ReservationError:
991
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
992
                                         " from the address pool" % idx,
993
                                         errors.ECODE_STATE)
994
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
995
          else:
996
            try:
997
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
998
            except errors.ReservationError:
999
              raise errors.OpPrereqError("IP address %s already in use"
1000
                                         " or does not belong to network %s" %
1001
                                         (nic.ip, nobj.name),
1002
                                         errors.ECODE_NOTUNIQUE)
1003

    
1004
      # net is None, ip None or given
1005
      elif self.op.conflicts_check:
1006
        _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1007

    
1008
    # mirror node verification
1009
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1010
      if self.op.snode == pnode.name:
1011
        raise errors.OpPrereqError("The secondary node cannot be the"
1012
                                   " primary node", errors.ECODE_INVAL)
1013
      CheckNodeOnline(self, self.op.snode)
1014
      CheckNodeNotDrained(self, self.op.snode)
1015
      CheckNodeVmCapable(self, self.op.snode)
1016
      self.secondaries.append(self.op.snode)
1017

    
1018
      snode = self.cfg.GetNodeInfo(self.op.snode)
1019
      if pnode.group != snode.group:
1020
        self.LogWarning("The primary and secondary nodes are in two"
1021
                        " different node groups; the disk parameters"
1022
                        " from the first disk's node group will be"
1023
                        " used")
1024

    
1025
    nodes = [pnode]
1026
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1027
      nodes.append(snode)
1028
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1029
    excl_stor = compat.any(map(has_es, nodes))
1030
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1031
      raise errors.OpPrereqError("Disk template %s not supported with"
1032
                                 " exclusive storage" % self.op.disk_template,
1033
                                 errors.ECODE_STATE)
1034
    for disk in self.disks:
1035
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1036

    
1037
    nodenames = [pnode.name] + self.secondaries
1038

    
1039
    if not self.adopt_disks:
1040
      if self.op.disk_template == constants.DT_RBD:
1041
        # _CheckRADOSFreeSpace() is just a placeholder.
1042
        # Any function that checks prerequisites can be placed here.
1043
        # Check if there is enough space on the RADOS cluster.
1044
        CheckRADOSFreeSpace()
1045
      elif self.op.disk_template == constants.DT_EXT:
1046
        # FIXME: Function that checks prereqs if needed
1047
        pass
1048
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1049
        # Check lv size requirements, if not adopting
1050
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1051
        CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1052
      else:
1053
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1054
        pass
1055

    
1056
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1057
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1058
                                disk[constants.IDISK_ADOPT])
1059
                     for disk in self.disks])
1060
      if len(all_lvs) != len(self.disks):
1061
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1062
                                   errors.ECODE_INVAL)
1063
      for lv_name in all_lvs:
1064
        try:
1065
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1066
          # to ReserveLV uses the same syntax
1067
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1068
        except errors.ReservationError:
1069
          raise errors.OpPrereqError("LV named %s used by another instance" %
1070
                                     lv_name, errors.ECODE_NOTUNIQUE)
1071

    
1072
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1073
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1074

    
1075
      node_lvs = self.rpc.call_lv_list([pnode.name],
1076
                                       vg_names.payload.keys())[pnode.name]
1077
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1078
      node_lvs = node_lvs.payload
1079

    
1080
      delta = all_lvs.difference(node_lvs.keys())
1081
      if delta:
1082
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1083
                                   utils.CommaJoin(delta),
1084
                                   errors.ECODE_INVAL)
1085
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1086
      if online_lvs:
1087
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1088
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1089
                                   errors.ECODE_STATE)
1090
      # update the size of disk based on what is found
1091
      for dsk in self.disks:
1092
        dsk[constants.IDISK_SIZE] = \
1093
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1094
                                        dsk[constants.IDISK_ADOPT])][0]))
1095

    
1096
    elif self.op.disk_template == constants.DT_BLOCK:
1097
      # Normalize and de-duplicate device paths
1098
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1099
                       for disk in self.disks])
1100
      if len(all_disks) != len(self.disks):
1101
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1102
                                   errors.ECODE_INVAL)
1103
      baddisks = [d for d in all_disks
1104
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1105
      if baddisks:
1106
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1107
                                   " cannot be adopted" %
1108
                                   (utils.CommaJoin(baddisks),
1109
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1110
                                   errors.ECODE_INVAL)
1111

    
1112
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
1113
                                            list(all_disks))[pnode.name]
1114
      node_disks.Raise("Cannot get block device information from node %s" %
1115
                       pnode.name)
1116
      node_disks = node_disks.payload
1117
      delta = all_disks.difference(node_disks.keys())
1118
      if delta:
1119
        raise errors.OpPrereqError("Missing block device(s): %s" %
1120
                                   utils.CommaJoin(delta),
1121
                                   errors.ECODE_INVAL)
1122
      for dsk in self.disks:
1123
        dsk[constants.IDISK_SIZE] = \
1124
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1125

    
1126
    # Verify instance specs
1127
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1128
    ispec = {
1129
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1130
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1131
      constants.ISPEC_DISK_COUNT: len(self.disks),
1132
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1133
                                  for disk in self.disks],
1134
      constants.ISPEC_NIC_COUNT: len(self.nics),
1135
      constants.ISPEC_SPINDLE_USE: spindle_use,
1136
      }
1137

    
1138
    group_info = self.cfg.GetNodeGroup(pnode.group)
1139
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1140
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1141
                                               self.op.disk_template)
1142
    if not self.op.ignore_ipolicy and res:
1143
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1144
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1145
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1146

    
1147
    CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1148

    
1149
    CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1150
    # check OS parameters (remotely)
1151
    CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1152

    
1153
    CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1154

    
1155
    #TODO: _CheckExtParams (remotely)
1156
    # Check parameters for extstorage
1157

    
1158
    # memory check on primary node
1159
    #TODO(dynmem): use MINMEM for checking
1160
    if self.op.start:
1161
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1162
                                self.op.hvparams)
1163
      CheckNodeFreeMemory(self, self.pnode.name,
1164
                          "creating instance %s" % self.op.instance_name,
1165
                          self.be_full[constants.BE_MAXMEM],
1166
                          self.op.hypervisor, hvfull)
1167

    
1168
    self.dry_run_result = list(nodenames)
1169

    
1170
  def Exec(self, feedback_fn):
1171
    """Create and add the instance to the cluster.
1172

1173
    """
1174
    instance = self.op.instance_name
1175
    pnode_name = self.pnode.name
1176

    
1177
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1178
                self.owned_locks(locking.LEVEL_NODE)), \
1179
      "Node locks differ from node resource locks"
1180
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1181

    
1182
    ht_kind = self.op.hypervisor
1183
    if ht_kind in constants.HTS_REQ_PORT:
1184
      network_port = self.cfg.AllocatePort()
1185
    else:
1186
      network_port = None
1187

    
1188
    # This is ugly but we got a chicken-egg problem here
1189
    # We can only take the group disk parameters, as the instance
1190
    # has no disks yet (we are generating them right here).
1191
    node = self.cfg.GetNodeInfo(pnode_name)
1192
    nodegroup = self.cfg.GetNodeGroup(node.group)
1193
    disks = GenerateDiskTemplate(self,
1194
                                 self.op.disk_template,
1195
                                 instance, pnode_name,
1196
                                 self.secondaries,
1197
                                 self.disks,
1198
                                 self.instance_file_storage_dir,
1199
                                 self.op.file_driver,
1200
                                 0,
1201
                                 feedback_fn,
1202
                                 self.cfg.GetGroupDiskParams(nodegroup))
1203

    
1204
    iobj = objects.Instance(name=instance, os=self.op.os_type,
1205
                            primary_node=pnode_name,
1206
                            nics=self.nics, disks=disks,
1207
                            disk_template=self.op.disk_template,
1208
                            disks_active=False,
1209
                            admin_state=constants.ADMINST_DOWN,
1210
                            network_port=network_port,
1211
                            beparams=self.op.beparams,
1212
                            hvparams=self.op.hvparams,
1213
                            hypervisor=self.op.hypervisor,
1214
                            osparams=self.op.osparams,
1215
                            )
1216

    
1217
    if self.op.tags:
1218
      for tag in self.op.tags:
1219
        iobj.AddTag(tag)
1220

    
1221
    if self.adopt_disks:
1222
      if self.op.disk_template == constants.DT_PLAIN:
1223
        # rename LVs to the newly-generated names; we need to construct
1224
        # 'fake' LV disks with the old data, plus the new unique_id
1225
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1226
        rename_to = []
1227
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1228
          rename_to.append(t_dsk.logical_id)
1229
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1230
          self.cfg.SetDiskID(t_dsk, pnode_name)
1231
        result = self.rpc.call_blockdev_rename(pnode_name,
1232
                                               zip(tmp_disks, rename_to))
1233
        result.Raise("Failed to rename adoped LVs")
1234
    else:
1235
      feedback_fn("* creating instance disks...")
1236
      try:
1237
        CreateDisks(self, iobj)
1238
      except errors.OpExecError:
1239
        self.LogWarning("Device creation failed")
1240
        self.cfg.ReleaseDRBDMinors(instance)
1241
        raise
1242

    
1243
    feedback_fn("adding instance %s to cluster config" % instance)
1244

    
1245
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1246

    
1247
    # Declare that we don't want to remove the instance lock anymore, as we've
1248
    # added the instance to the config
1249
    del self.remove_locks[locking.LEVEL_INSTANCE]
1250

    
1251
    if self.op.mode == constants.INSTANCE_IMPORT:
1252
      # Release unused nodes
1253
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1254
    else:
1255
      # Release all nodes
1256
      ReleaseLocks(self, locking.LEVEL_NODE)
1257

    
1258
    disk_abort = False
1259
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1260
      feedback_fn("* wiping instance disks...")
1261
      try:
1262
        WipeDisks(self, iobj)
1263
      except errors.OpExecError, err:
1264
        logging.exception("Wiping disks failed")
1265
        self.LogWarning("Wiping instance disks failed (%s)", err)
1266
        disk_abort = True
1267

    
1268
    if disk_abort:
1269
      # Something is already wrong with the disks, don't do anything else
1270
      pass
1271
    elif self.op.wait_for_sync:
1272
      disk_abort = not WaitForSync(self, iobj)
1273
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1274
      # make sure the disks are not degraded (still sync-ing is ok)
1275
      feedback_fn("* checking mirrors status")
1276
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1277
    else:
1278
      disk_abort = False
1279

    
1280
    if disk_abort:
1281
      RemoveDisks(self, iobj)
1282
      self.cfg.RemoveInstance(iobj.name)
1283
      # Make sure the instance lock gets removed
1284
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1285
      raise errors.OpExecError("There are some degraded disks for"
1286
                               " this instance")
1287

    
1288
    # instance disks are now active
1289
    iobj.disks_active = True
1290

    
1291
    # Release all node resource locks
1292
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1293

    
1294
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1295
      # we need to set the disks ID to the primary node, since the
1296
      # preceding code might or might have not done it, depending on
1297
      # disk template and other options
1298
      for disk in iobj.disks:
1299
        self.cfg.SetDiskID(disk, pnode_name)
1300
      if self.op.mode == constants.INSTANCE_CREATE:
1301
        if not self.op.no_install:
1302
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1303
                        not self.op.wait_for_sync)
1304
          if pause_sync:
1305
            feedback_fn("* pausing disk sync to install instance OS")
1306
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1307
                                                              (iobj.disks,
1308
                                                               iobj), True)
1309
            for idx, success in enumerate(result.payload):
1310
              if not success:
1311
                logging.warn("pause-sync of instance %s for disk %d failed",
1312
                             instance, idx)
1313

    
1314
          feedback_fn("* running the instance OS create scripts...")
1315
          # FIXME: pass debug option from opcode to backend
1316
          os_add_result = \
1317
            self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1318
                                          self.op.debug_level)
1319
          if pause_sync:
1320
            feedback_fn("* resuming disk sync")
1321
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1322
                                                              (iobj.disks,
1323
                                                               iobj), False)
1324
            for idx, success in enumerate(result.payload):
1325
              if not success:
1326
                logging.warn("resume-sync of instance %s for disk %d failed",
1327
                             instance, idx)
1328

    
1329
          os_add_result.Raise("Could not add os for instance %s"
1330
                              " on node %s" % (instance, pnode_name))
1331

    
1332
      else:
1333
        if self.op.mode == constants.INSTANCE_IMPORT:
1334
          feedback_fn("* running the instance OS import scripts...")
1335

    
1336
          transfers = []
1337

    
1338
          for idx, image in enumerate(self.src_images):
1339
            if not image:
1340
              continue
1341

    
1342
            # FIXME: pass debug option from opcode to backend
1343
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1344
                                               constants.IEIO_FILE, (image, ),
1345
                                               constants.IEIO_SCRIPT,
1346
                                               (iobj.disks[idx], idx),
1347
                                               None)
1348
            transfers.append(dt)
1349

    
1350
          import_result = \
1351
            masterd.instance.TransferInstanceData(self, feedback_fn,
1352
                                                  self.op.src_node, pnode_name,
1353
                                                  self.pnode.secondary_ip,
1354
                                                  iobj, transfers)
1355
          if not compat.all(import_result):
1356
            self.LogWarning("Some disks for instance %s on node %s were not"
1357
                            " imported successfully" % (instance, pnode_name))
1358

    
1359
          rename_from = self._old_instance_name
1360

    
1361
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1362
          feedback_fn("* preparing remote import...")
1363
          # The source cluster will stop the instance before attempting to make
1364
          # a connection. In some cases stopping an instance can take a long
1365
          # time, hence the shutdown timeout is added to the connection
1366
          # timeout.
1367
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1368
                             self.op.source_shutdown_timeout)
1369
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1370

    
1371
          assert iobj.primary_node == self.pnode.name
1372
          disk_results = \
1373
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1374
                                          self.source_x509_ca,
1375
                                          self._cds, timeouts)
1376
          if not compat.all(disk_results):
1377
            # TODO: Should the instance still be started, even if some disks
1378
            # failed to import (valid for local imports, too)?
1379
            self.LogWarning("Some disks for instance %s on node %s were not"
1380
                            " imported successfully" % (instance, pnode_name))
1381

    
1382
          rename_from = self.source_instance_name
1383

    
1384
        else:
1385
          # also checked in the prereq part
1386
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1387
                                       % self.op.mode)
1388

    
1389
        # Run rename script on newly imported instance
1390
        assert iobj.name == instance
1391
        feedback_fn("Running rename script for %s" % instance)
1392
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1393
                                                   rename_from,
1394
                                                   self.op.debug_level)
1395
        result.Warn("Failed to run rename script for %s on node %s" %
1396
                    (instance, pnode_name), self.LogWarning)
1397

    
1398
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1399

    
1400
    if self.op.start:
1401
      iobj.admin_state = constants.ADMINST_UP
1402
      self.cfg.Update(iobj, feedback_fn)
1403
      logging.info("Starting instance %s on node %s", instance, pnode_name)
1404
      feedback_fn("* starting instance...")
1405
      result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1406
                                            False, self.op.reason)
1407
      result.Raise("Could not start instance")
1408

    
1409
    return list(iobj.all_nodes)
1410

    
1411

    
1412
class LUInstanceRename(LogicalUnit):
1413
  """Rename an instance.
1414

1415
  """
1416
  HPATH = "instance-rename"
1417
  HTYPE = constants.HTYPE_INSTANCE
1418

    
1419
  def CheckArguments(self):
1420
    """Check arguments.
1421

1422
    """
1423
    if self.op.ip_check and not self.op.name_check:
1424
      # TODO: make the ip check more flexible and not depend on the name check
1425
      raise errors.OpPrereqError("IP address check requires a name check",
1426
                                 errors.ECODE_INVAL)
1427

    
1428
  def BuildHooksEnv(self):
1429
    """Build hooks env.
1430

1431
    This runs on master, primary and secondary nodes of the instance.
1432

1433
    """
1434
    env = BuildInstanceHookEnvByObject(self, self.instance)
1435
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1436
    return env
1437

    
1438
  def BuildHooksNodes(self):
1439
    """Build hooks nodes.
1440

1441
    """
1442
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1443
    return (nl, nl)
1444

    
1445
  def CheckPrereq(self):
1446
    """Check prerequisites.
1447

1448
    This checks that the instance is in the cluster and is not running.
1449

1450
    """
1451
    self.op.instance_name = ExpandInstanceName(self.cfg,
1452
                                               self.op.instance_name)
1453
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1454
    assert instance is not None
1455
    CheckNodeOnline(self, instance.primary_node)
1456
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1457
                       msg="cannot rename")
1458
    self.instance = instance
1459

    
1460
    new_name = self.op.new_name
1461
    if self.op.name_check:
1462
      hostname = _CheckHostnameSane(self, new_name)
1463
      new_name = self.op.new_name = hostname.name
1464
      if (self.op.ip_check and
1465
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1466
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1467
                                   (hostname.ip, new_name),
1468
                                   errors.ECODE_NOTUNIQUE)
1469

    
1470
    instance_list = self.cfg.GetInstanceList()
1471
    if new_name in instance_list and new_name != instance.name:
1472
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1473
                                 new_name, errors.ECODE_EXISTS)
1474

    
1475
  def Exec(self, feedback_fn):
1476
    """Rename the instance.
1477

1478
    """
1479
    inst = self.instance
1480
    old_name = inst.name
1481

    
1482
    rename_file_storage = False
1483
    if (inst.disk_template in constants.DTS_FILEBASED and
1484
        self.op.new_name != inst.name):
1485
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1486
      rename_file_storage = True
1487

    
1488
    self.cfg.RenameInstance(inst.name, self.op.new_name)
1489
    # Change the instance lock. This is definitely safe while we hold the BGL.
1490
    # Otherwise the new lock would have to be added in acquired mode.
1491
    assert self.REQ_BGL
1492
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1493
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1494
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1495

    
1496
    # re-read the instance from the configuration after rename
1497
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
1498

    
1499
    if rename_file_storage:
1500
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1501
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1502
                                                     old_file_storage_dir,
1503
                                                     new_file_storage_dir)
1504
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1505
                   " (but the instance has been renamed in Ganeti)" %
1506
                   (inst.primary_node, old_file_storage_dir,
1507
                    new_file_storage_dir))
1508

    
1509
    StartInstanceDisks(self, inst, None)
1510
    # update info on disks
1511
    info = GetInstanceInfoText(inst)
1512
    for (idx, disk) in enumerate(inst.disks):
1513
      for node in inst.all_nodes:
1514
        self.cfg.SetDiskID(disk, node)
1515
        result = self.rpc.call_blockdev_setinfo(node, disk, info)
1516
        result.Warn("Error setting info on node %s for disk %s" % (node, idx),
1517
                    self.LogWarning)
1518
    try:
1519
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1520
                                                 old_name, self.op.debug_level)
1521
      result.Warn("Could not run OS rename script for instance %s on node %s"
1522
                  " (but the instance has been renamed in Ganeti)" %
1523
                  (inst.name, inst.primary_node), self.LogWarning)
1524
    finally:
1525
      ShutdownInstanceDisks(self, inst)
1526

    
1527
    return inst.name
1528

    
1529

    
1530
class LUInstanceRemove(LogicalUnit):
1531
  """Remove an instance.
1532

1533
  """
1534
  HPATH = "instance-remove"
1535
  HTYPE = constants.HTYPE_INSTANCE
1536
  REQ_BGL = False
1537

    
1538
  def ExpandNames(self):
1539
    self._ExpandAndLockInstance()
1540
    self.needed_locks[locking.LEVEL_NODE] = []
1541
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1542
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1543

    
1544
  def DeclareLocks(self, level):
1545
    if level == locking.LEVEL_NODE:
1546
      self._LockInstancesNodes()
1547
    elif level == locking.LEVEL_NODE_RES:
1548
      # Copy node locks
1549
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1550
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1551

    
1552
  def BuildHooksEnv(self):
1553
    """Build hooks env.
1554

1555
    This runs on master, primary and secondary nodes of the instance.
1556

1557
    """
1558
    env = BuildInstanceHookEnvByObject(self, self.instance)
1559
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1560
    return env
1561

    
1562
  def BuildHooksNodes(self):
1563
    """Build hooks nodes.
1564

1565
    """
1566
    nl = [self.cfg.GetMasterNode()]
1567
    nl_post = list(self.instance.all_nodes) + nl
1568
    return (nl, nl_post)
1569

    
1570
  def CheckPrereq(self):
1571
    """Check prerequisites.
1572

1573
    This checks that the instance is in the cluster.
1574

1575
    """
1576
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1577
    assert self.instance is not None, \
1578
      "Cannot retrieve locked instance %s" % self.op.instance_name
1579

    
1580
  def Exec(self, feedback_fn):
1581
    """Remove the instance.
1582

1583
    """
1584
    instance = self.instance
1585
    logging.info("Shutting down instance %s on node %s",
1586
                 instance.name, instance.primary_node)
1587

    
1588
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1589
                                             self.op.shutdown_timeout,
1590
                                             self.op.reason)
1591
    if self.op.ignore_failures:
1592
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1593
    else:
1594
      result.Raise("Could not shutdown instance %s on node %s" %
1595
                   (instance.name, instance.primary_node))
1596

    
1597
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1598
            self.owned_locks(locking.LEVEL_NODE_RES))
1599
    assert not (set(instance.all_nodes) -
1600
                self.owned_locks(locking.LEVEL_NODE)), \
1601
      "Not owning correct locks"
1602

    
1603
    RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1604

    
1605

    
1606
class LUInstanceMove(LogicalUnit):
1607
  """Move an instance by data-copying.
1608

1609
  """
1610
  HPATH = "instance-move"
1611
  HTYPE = constants.HTYPE_INSTANCE
1612
  REQ_BGL = False
1613

    
1614
  def ExpandNames(self):
1615
    self._ExpandAndLockInstance()
1616
    target_node = ExpandNodeName(self.cfg, self.op.target_node)
1617
    self.op.target_node = target_node
1618
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
1619
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1620
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1621

    
1622
  def DeclareLocks(self, level):
1623
    if level == locking.LEVEL_NODE:
1624
      self._LockInstancesNodes(primary_only=True)
1625
    elif level == locking.LEVEL_NODE_RES:
1626
      # Copy node locks
1627
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1628
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1629

    
1630
  def BuildHooksEnv(self):
1631
    """Build hooks env.
1632

1633
    This runs on master, primary and secondary nodes of the instance.
1634

1635
    """
1636
    env = {
1637
      "TARGET_NODE": self.op.target_node,
1638
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1639
      }
1640
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1641
    return env
1642

    
1643
  def BuildHooksNodes(self):
1644
    """Build hooks nodes.
1645

1646
    """
1647
    nl = [
1648
      self.cfg.GetMasterNode(),
1649
      self.instance.primary_node,
1650
      self.op.target_node,
1651
      ]
1652
    return (nl, nl)
1653

    
1654
  def CheckPrereq(self):
1655
    """Check prerequisites.
1656

1657
    This checks that the instance is in the cluster.
1658

1659
    """
1660
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1661
    assert self.instance is not None, \
1662
      "Cannot retrieve locked instance %s" % self.op.instance_name
1663

    
1664
    if instance.disk_template not in constants.DTS_COPYABLE:
1665
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1666
                                 instance.disk_template, errors.ECODE_STATE)
1667

    
1668
    node = self.cfg.GetNodeInfo(self.op.target_node)
1669
    assert node is not None, \
1670
      "Cannot retrieve locked node %s" % self.op.target_node
1671

    
1672
    self.target_node = target_node = node.name
1673

    
1674
    if target_node == instance.primary_node:
1675
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1676
                                 (instance.name, target_node),
1677
                                 errors.ECODE_STATE)
1678

    
1679
    bep = self.cfg.GetClusterInfo().FillBE(instance)
1680

    
1681
    for idx, dsk in enumerate(instance.disks):
1682
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1683
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1684
                                   " cannot copy" % idx, errors.ECODE_STATE)
1685

    
1686
    CheckNodeOnline(self, target_node)
1687
    CheckNodeNotDrained(self, target_node)
1688
    CheckNodeVmCapable(self, target_node)
1689
    cluster = self.cfg.GetClusterInfo()
1690
    group_info = self.cfg.GetNodeGroup(node.group)
1691
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1692
    CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1693
                           ignore=self.op.ignore_ipolicy)
1694

    
1695
    if instance.admin_state == constants.ADMINST_UP:
1696
      # check memory requirements on the secondary node
1697
      CheckNodeFreeMemory(
1698
          self, target_node, "failing over instance %s" %
1699
          instance.name, bep[constants.BE_MAXMEM], instance.hypervisor,
1700
          self.cfg.GetClusterInfo().hvparams[instance.hypervisor])
1701
    else:
1702
      self.LogInfo("Not checking memory on the secondary node as"
1703
                   " instance will not be started")
1704

    
1705
    # check bridge existance
1706
    CheckInstanceBridgesExist(self, instance, node=target_node)
1707

    
1708
  def Exec(self, feedback_fn):
1709
    """Move an instance.
1710

1711
    The move is done by shutting it down on its present node, copying
1712
    the data over (slow) and starting it on the new node.
1713

1714
    """
1715
    instance = self.instance
1716

    
1717
    source_node = instance.primary_node
1718
    target_node = self.target_node
1719

    
1720
    self.LogInfo("Shutting down instance %s on source node %s",
1721
                 instance.name, source_node)
1722

    
1723
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1724
            self.owned_locks(locking.LEVEL_NODE_RES))
1725

    
1726
    result = self.rpc.call_instance_shutdown(source_node, instance,
1727
                                             self.op.shutdown_timeout,
1728
                                             self.op.reason)
1729
    if self.op.ignore_consistency:
1730
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1731
                  " anyway. Please make sure node %s is down. Error details" %
1732
                  (instance.name, source_node, source_node), self.LogWarning)
1733
    else:
1734
      result.Raise("Could not shutdown instance %s on node %s" %
1735
                   (instance.name, source_node))
1736

    
1737
    # create the target disks
1738
    try:
1739
      CreateDisks(self, instance, target_node=target_node)
1740
    except errors.OpExecError:
1741
      self.LogWarning("Device creation failed")
1742
      self.cfg.ReleaseDRBDMinors(instance.name)
1743
      raise
1744

    
1745
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1746

    
1747
    errs = []
1748
    # activate, get path, copy the data over
1749
    for idx, disk in enumerate(instance.disks):
1750
      self.LogInfo("Copying data for disk %d", idx)
1751
      result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1752
                                               instance.name, True, idx)
1753
      if result.fail_msg:
1754
        self.LogWarning("Can't assemble newly created disk %d: %s",
1755
                        idx, result.fail_msg)
1756
        errs.append(result.fail_msg)
1757
        break
1758
      dev_path = result.payload
1759
      result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1760
                                             target_node, dev_path,
1761
                                             cluster_name)
1762
      if result.fail_msg:
1763
        self.LogWarning("Can't copy data over for disk %d: %s",
1764
                        idx, result.fail_msg)
1765
        errs.append(result.fail_msg)
1766
        break
1767

    
1768
    if errs:
1769
      self.LogWarning("Some disks failed to copy, aborting")
1770
      try:
1771
        RemoveDisks(self, instance, target_node=target_node)
1772
      finally:
1773
        self.cfg.ReleaseDRBDMinors(instance.name)
1774
        raise errors.OpExecError("Errors during disk copy: %s" %
1775
                                 (",".join(errs),))
1776

    
1777
    instance.primary_node = target_node
1778
    self.cfg.Update(instance, feedback_fn)
1779

    
1780
    self.LogInfo("Removing the disks on the original node")
1781
    RemoveDisks(self, instance, target_node=source_node)
1782

    
1783
    # Only start the instance if it's marked as up
1784
    if instance.admin_state == constants.ADMINST_UP:
1785
      self.LogInfo("Starting instance %s on node %s",
1786
                   instance.name, target_node)
1787

    
1788
      disks_ok, _ = AssembleInstanceDisks(self, instance,
1789
                                          ignore_secondaries=True)
1790
      if not disks_ok:
1791
        ShutdownInstanceDisks(self, instance)
1792
        raise errors.OpExecError("Can't activate the instance's disks")
1793

    
1794
      result = self.rpc.call_instance_start(target_node,
1795
                                            (instance, None, None), False,
1796
                                            self.op.reason)
1797
      msg = result.fail_msg
1798
      if msg:
1799
        ShutdownInstanceDisks(self, instance)
1800
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1801
                                 (instance.name, target_node, msg))
1802

    
1803

    
1804
class LUInstanceMultiAlloc(NoHooksLU):
1805
  """Allocates multiple instances at the same time.
1806

1807
  """
1808
  REQ_BGL = False
1809

    
1810
  def CheckArguments(self):
1811
    """Check arguments.
1812

1813
    """
1814
    nodes = []
1815
    for inst in self.op.instances:
1816
      if inst.iallocator is not None:
1817
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1818
                                   " instance objects", errors.ECODE_INVAL)
1819
      nodes.append(bool(inst.pnode))
1820
      if inst.disk_template in constants.DTS_INT_MIRROR:
1821
        nodes.append(bool(inst.snode))
1822

    
1823
    has_nodes = compat.any(nodes)
1824
    if compat.all(nodes) ^ has_nodes:
1825
      raise errors.OpPrereqError("There are instance objects providing"
1826
                                 " pnode/snode while others do not",
1827
                                 errors.ECODE_INVAL)
1828

    
1829
    if self.op.iallocator is None:
1830
      default_iallocator = self.cfg.GetDefaultIAllocator()
1831
      if default_iallocator and has_nodes:
1832
        self.op.iallocator = default_iallocator
1833
      else:
1834
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1835
                                   " given and no cluster-wide default"
1836
                                   " iallocator found; please specify either"
1837
                                   " an iallocator or nodes on the instances"
1838
                                   " or set a cluster-wide default iallocator",
1839
                                   errors.ECODE_INVAL)
1840

    
1841
    _CheckOpportunisticLocking(self.op)
1842

    
1843
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1844
    if dups:
1845
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1846
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1847

    
1848
  def ExpandNames(self):
1849
    """Calculate the locks.
1850

1851
    """
1852
    self.share_locks = ShareAll()
1853
    self.needed_locks = {
1854
      # iallocator will select nodes and even if no iallocator is used,
1855
      # collisions with LUInstanceCreate should be avoided
1856
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1857
      }
1858

    
1859
    if self.op.iallocator:
1860
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1861
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1862

    
1863
      if self.op.opportunistic_locking:
1864
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1865
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1866
    else:
1867
      nodeslist = []
1868
      for inst in self.op.instances:
1869
        inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1870
        nodeslist.append(inst.pnode)
1871
        if inst.snode is not None:
1872
          inst.snode = ExpandNodeName(self.cfg, inst.snode)
1873
          nodeslist.append(inst.snode)
1874

    
1875
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1876
      # Lock resources of instance's primary and secondary nodes (copy to
1877
      # prevent accidential modification)
1878
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1879

    
1880
  def CheckPrereq(self):
1881
    """Check prerequisite.
1882

1883
    """
1884
    cluster = self.cfg.GetClusterInfo()
1885
    default_vg = self.cfg.GetVGName()
1886
    ec_id = self.proc.GetECId()
1887

    
1888
    if self.op.opportunistic_locking:
1889
      # Only consider nodes for which a lock is held
1890
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1891
    else:
1892
      node_whitelist = None
1893

    
1894
    insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1895
                                         _ComputeNics(op, cluster, None,
1896
                                                      self.cfg, ec_id),
1897
                                         _ComputeFullBeParams(op, cluster),
1898
                                         node_whitelist)
1899
             for op in self.op.instances]
1900

    
1901
    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1902
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1903

    
1904
    ial.Run(self.op.iallocator)
1905

    
1906
    if not ial.success:
1907
      raise errors.OpPrereqError("Can't compute nodes using"
1908
                                 " iallocator '%s': %s" %
1909
                                 (self.op.iallocator, ial.info),
1910
                                 errors.ECODE_NORES)
1911

    
1912
    self.ia_result = ial.result
1913

    
1914
    if self.op.dry_run:
1915
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1916
        constants.JOB_IDS_KEY: [],
1917
        })
1918

    
1919
  def _ConstructPartialResult(self):
1920
    """Contructs the partial result.
1921

1922
    """
1923
    (allocatable, failed) = self.ia_result
1924
    return {
1925
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1926
        map(compat.fst, allocatable),
1927
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1928
      }
1929

    
1930
  def Exec(self, feedback_fn):
1931
    """Executes the opcode.
1932

1933
    """
1934
    op2inst = dict((op.instance_name, op) for op in self.op.instances)
1935
    (allocatable, failed) = self.ia_result
1936

    
1937
    jobs = []
1938
    for (name, nodes) in allocatable:
1939
      op = op2inst.pop(name)
1940

    
1941
      if len(nodes) > 1:
1942
        (op.pnode, op.snode) = nodes
1943
      else:
1944
        (op.pnode,) = nodes
1945

    
1946
      jobs.append([op])
1947

    
1948
    missing = set(op2inst.keys()) - set(failed)
1949
    assert not missing, \
1950
      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1951

    
1952
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
1953

    
1954

    
1955
class _InstNicModPrivate:
1956
  """Data structure for network interface modifications.
1957

1958
  Used by L{LUInstanceSetParams}.
1959

1960
  """
1961
  def __init__(self):
1962
    self.params = None
1963
    self.filled = None
1964

    
1965

    
1966
def _PrepareContainerMods(mods, private_fn):
1967
  """Prepares a list of container modifications by adding a private data field.
1968

1969
  @type mods: list of tuples; (operation, index, parameters)
1970
  @param mods: List of modifications
1971
  @type private_fn: callable or None
1972
  @param private_fn: Callable for constructing a private data field for a
1973
    modification
1974
  @rtype: list
1975

1976
  """
1977
  if private_fn is None:
1978
    fn = lambda: None
1979
  else:
1980
    fn = private_fn
1981

    
1982
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
1983

    
1984

    
1985
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_specs):
1986
  """Checks if nodes have enough physical CPUs
1987

1988
  This function checks if all given nodes have the needed number of
1989
  physical CPUs. In case any node has less CPUs or we cannot get the
1990
  information from the node, this function raises an OpPrereqError
1991
  exception.
1992

1993
  @type lu: C{LogicalUnit}
1994
  @param lu: a logical unit from which we get configuration data
1995
  @type nodenames: C{list}
1996
  @param nodenames: the list of node names to check
1997
  @type requested: C{int}
1998
  @param requested: the minimum acceptable number of physical CPUs
1999
  @type hypervisor_specs: list of pairs (string, dict of strings)
2000
  @param hypervisor_specs: list of hypervisor specifications in
2001
      pairs (hypervisor_name, hvparams)
2002
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2003
      or we cannot check the node
2004

2005
  """
2006
  nodeinfo = lu.rpc.call_node_info(nodenames, None, hypervisor_specs, None)
2007
  for node in nodenames:
2008
    info = nodeinfo[node]
2009
    info.Raise("Cannot get current information from node %s" % node,
2010
               prereq=True, ecode=errors.ECODE_ENVIRON)
2011
    (_, _, (hv_info, )) = info.payload
2012
    num_cpus = hv_info.get("cpu_total", None)
2013
    if not isinstance(num_cpus, int):
2014
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2015
                                 " on node %s, result was '%s'" %
2016
                                 (node, num_cpus), errors.ECODE_ENVIRON)
2017
    if requested > num_cpus:
2018
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2019
                                 "required" % (node, num_cpus, requested),
2020
                                 errors.ECODE_NORES)
2021

    
2022

    
2023
def GetItemFromContainer(identifier, kind, container):
2024
  """Return the item refered by the identifier.
2025

2026
  @type identifier: string
2027
  @param identifier: Item index or name or UUID
2028
  @type kind: string
2029
  @param kind: One-word item description
2030
  @type container: list
2031
  @param container: Container to get the item from
2032

2033
  """
2034
  # Index
2035
  try:
2036
    idx = int(identifier)
2037
    if idx == -1:
2038
      # Append
2039
      absidx = len(container) - 1
2040
    elif idx < 0:
2041
      raise IndexError("Not accepting negative indices other than -1")
2042
    elif idx > len(container):
2043
      raise IndexError("Got %s index %s, but there are only %s" %
2044
                       (kind, idx, len(container)))
2045
    else:
2046
      absidx = idx
2047
    return (absidx, container[idx])
2048
  except ValueError:
2049
    pass
2050

    
2051
  for idx, item in enumerate(container):
2052
    if item.uuid == identifier or item.name == identifier:
2053
      return (idx, item)
2054

    
2055
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2056
                             (kind, identifier), errors.ECODE_NOENT)
2057

    
2058

    
2059
def _ApplyContainerMods(kind, container, chgdesc, mods,
2060
                        create_fn, modify_fn, remove_fn):
2061
  """Applies descriptions in C{mods} to C{container}.
2062

2063
  @type kind: string
2064
  @param kind: One-word item description
2065
  @type container: list
2066
  @param container: Container to modify
2067
  @type chgdesc: None or list
2068
  @param chgdesc: List of applied changes
2069
  @type mods: list
2070
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2071
  @type create_fn: callable
2072
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2073
    receives absolute item index, parameters and private data object as added
2074
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2075
    as list
2076
  @type modify_fn: callable
2077
  @param modify_fn: Callback for modifying an existing item
2078
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2079
    and private data object as added by L{_PrepareContainerMods}, returns
2080
    changes as list
2081
  @type remove_fn: callable
2082
  @param remove_fn: Callback on removing item; receives absolute item index,
2083
    item and private data object as added by L{_PrepareContainerMods}
2084

2085
  """
2086
  for (op, identifier, params, private) in mods:
2087
    changes = None
2088

    
2089
    if op == constants.DDM_ADD:
2090
      # Calculate where item will be added
2091
      # When adding an item, identifier can only be an index
2092
      try:
2093
        idx = int(identifier)
2094
      except ValueError:
2095
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2096
                                   " identifier for %s" % constants.DDM_ADD,
2097
                                   errors.ECODE_INVAL)
2098
      if idx == -1:
2099
        addidx = len(container)
2100
      else:
2101
        if idx < 0:
2102
          raise IndexError("Not accepting negative indices other than -1")
2103
        elif idx > len(container):
2104
          raise IndexError("Got %s index %s, but there are only %s" %
2105
                           (kind, idx, len(container)))
2106
        addidx = idx
2107

    
2108
      if create_fn is None:
2109
        item = params
2110
      else:
2111
        (item, changes) = create_fn(addidx, params, private)
2112

    
2113
      if idx == -1:
2114
        container.append(item)
2115
      else:
2116
        assert idx >= 0
2117
        assert idx <= len(container)
2118
        # list.insert does so before the specified index
2119
        container.insert(idx, item)
2120
    else:
2121
      # Retrieve existing item
2122
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2123

    
2124
      if op == constants.DDM_REMOVE:
2125
        assert not params
2126

    
2127
        if remove_fn is not None:
2128
          remove_fn(absidx, item, private)
2129

    
2130
        changes = [("%s/%s" % (kind, absidx), "remove")]
2131

    
2132
        assert container[absidx] == item
2133
        del container[absidx]
2134
      elif op == constants.DDM_MODIFY:
2135
        if modify_fn is not None:
2136
          changes = modify_fn(absidx, item, params, private)
2137
      else:
2138
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2139

    
2140
    assert _TApplyContModsCbChanges(changes)
2141

    
2142
    if not (chgdesc is None or changes is None):
2143
      chgdesc.extend(changes)
2144

    
2145

    
2146
def _UpdateIvNames(base_index, disks):
2147
  """Updates the C{iv_name} attribute of disks.
2148

2149
  @type disks: list of L{objects.Disk}
2150

2151
  """
2152
  for (idx, disk) in enumerate(disks):
2153
    disk.iv_name = "disk/%s" % (base_index + idx, )
2154

    
2155

    
2156
class LUInstanceSetParams(LogicalUnit):
2157
  """Modifies an instances's parameters.
2158

2159
  """
2160
  HPATH = "instance-modify"
2161
  HTYPE = constants.HTYPE_INSTANCE
2162
  REQ_BGL = False
2163

    
2164
  @staticmethod
2165
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2166
    assert ht.TList(mods)
2167
    assert not mods or len(mods[0]) in (2, 3)
2168

    
2169
    if mods and len(mods[0]) == 2:
2170
      result = []
2171

    
2172
      addremove = 0
2173
      for op, params in mods:
2174
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2175
          result.append((op, -1, params))
2176
          addremove += 1
2177

    
2178
          if addremove > 1:
2179
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2180
                                       " supported at a time" % kind,
2181
                                       errors.ECODE_INVAL)
2182
        else:
2183
          result.append((constants.DDM_MODIFY, op, params))
2184

    
2185
      assert verify_fn(result)
2186
    else:
2187
      result = mods
2188

    
2189
    return result
2190

    
2191
  @staticmethod
2192
  def _CheckMods(kind, mods, key_types, item_fn):
2193
    """Ensures requested disk/NIC modifications are valid.
2194

2195
    """
2196
    for (op, _, params) in mods:
2197
      assert ht.TDict(params)
2198

    
2199
      # If 'key_types' is an empty dict, we assume we have an
2200
      # 'ext' template and thus do not ForceDictType
2201
      if key_types:
2202
        utils.ForceDictType(params, key_types)
2203

    
2204
      if op == constants.DDM_REMOVE:
2205
        if params:
2206
          raise errors.OpPrereqError("No settings should be passed when"
2207
                                     " removing a %s" % kind,
2208
                                     errors.ECODE_INVAL)
2209
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2210
        item_fn(op, params)
2211
      else:
2212
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2213

    
2214
  @staticmethod
2215
  def _VerifyDiskModification(op, params, excl_stor):
2216
    """Verifies a disk modification.
2217

2218
    """
2219
    if op == constants.DDM_ADD:
2220
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2221
      if mode not in constants.DISK_ACCESS_SET:
2222
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2223
                                   errors.ECODE_INVAL)
2224

    
2225
      size = params.get(constants.IDISK_SIZE, None)
2226
      if size is None:
2227
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2228
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2229

    
2230
      try:
2231
        size = int(size)
2232
      except (TypeError, ValueError), err:
2233
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2234
                                   errors.ECODE_INVAL)
2235

    
2236
      params[constants.IDISK_SIZE] = size
2237
      name = params.get(constants.IDISK_NAME, None)
2238
      if name is not None and name.lower() == constants.VALUE_NONE:
2239
        params[constants.IDISK_NAME] = None
2240

    
2241
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2242

    
2243
    elif op == constants.DDM_MODIFY:
2244
      if constants.IDISK_SIZE in params:
2245
        raise errors.OpPrereqError("Disk size change not possible, use"
2246
                                   " grow-disk", errors.ECODE_INVAL)
2247
      if len(params) > 2:
2248
        raise errors.OpPrereqError("Disk modification doesn't support"
2249
                                   " additional arbitrary parameters",
2250
                                   errors.ECODE_INVAL)
2251
      name = params.get(constants.IDISK_NAME, None)
2252
      if name is not None and name.lower() == constants.VALUE_NONE:
2253
        params[constants.IDISK_NAME] = None
2254

    
2255
  @staticmethod
2256
  def _VerifyNicModification(op, params):
2257
    """Verifies a network interface modification.
2258

2259
    """
2260
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2261
      ip = params.get(constants.INIC_IP, None)
2262
      name = params.get(constants.INIC_NAME, None)
2263
      req_net = params.get(constants.INIC_NETWORK, None)
2264
      link = params.get(constants.NIC_LINK, None)
2265
      mode = params.get(constants.NIC_MODE, None)
2266
      if name is not None and name.lower() == constants.VALUE_NONE:
2267
        params[constants.INIC_NAME] = None
2268
      if req_net is not None:
2269
        if req_net.lower() == constants.VALUE_NONE:
2270
          params[constants.INIC_NETWORK] = None
2271
          req_net = None
2272
        elif link is not None or mode is not None:
2273
          raise errors.OpPrereqError("If network is given"
2274
                                     " mode or link should not",
2275
                                     errors.ECODE_INVAL)
2276

    
2277
      if op == constants.DDM_ADD:
2278
        macaddr = params.get(constants.INIC_MAC, None)
2279
        if macaddr is None:
2280
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2281

    
2282
      if ip is not None:
2283
        if ip.lower() == constants.VALUE_NONE:
2284
          params[constants.INIC_IP] = None
2285
        else:
2286
          if ip.lower() == constants.NIC_IP_POOL:
2287
            if op == constants.DDM_ADD and req_net is None:
2288
              raise errors.OpPrereqError("If ip=pool, parameter network"
2289
                                         " cannot be none",
2290
                                         errors.ECODE_INVAL)
2291
          else:
2292
            if not netutils.IPAddress.IsValid(ip):
2293
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2294
                                         errors.ECODE_INVAL)
2295

    
2296
      if constants.INIC_MAC in params:
2297
        macaddr = params[constants.INIC_MAC]
2298
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2299
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2300

    
2301
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2302
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2303
                                     " modifying an existing NIC",
2304
                                     errors.ECODE_INVAL)
2305

    
2306
  def CheckArguments(self):
2307
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2308
            self.op.hvparams or self.op.beparams or self.op.os_name or
2309
            self.op.offline is not None or self.op.runtime_mem or
2310
            self.op.pnode):
2311
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2312

    
2313
    if self.op.hvparams:
2314
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2315
                           "hypervisor", "instance", "cluster")
2316

    
2317
    self.op.disks = self._UpgradeDiskNicMods(
2318
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2319
    self.op.nics = self._UpgradeDiskNicMods(
2320
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2321

    
2322
    if self.op.disks and self.op.disk_template is not None:
2323
      raise errors.OpPrereqError("Disk template conversion and other disk"
2324
                                 " changes not supported at the same time",
2325
                                 errors.ECODE_INVAL)
2326

    
2327
    if (self.op.disk_template and
2328
        self.op.disk_template in constants.DTS_INT_MIRROR and
2329
        self.op.remote_node is None):
2330
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2331
                                 " one requires specifying a secondary node",
2332
                                 errors.ECODE_INVAL)
2333

    
2334
    # Check NIC modifications
2335
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2336
                    self._VerifyNicModification)
2337

    
2338
    if self.op.pnode:
2339
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2340

    
2341
  def ExpandNames(self):
2342
    self._ExpandAndLockInstance()
2343
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2344
    # Can't even acquire node locks in shared mode as upcoming changes in
2345
    # Ganeti 2.6 will start to modify the node object on disk conversion
2346
    self.needed_locks[locking.LEVEL_NODE] = []
2347
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2348
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2349
    # Look node group to look up the ipolicy
2350
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2351

    
2352
  def DeclareLocks(self, level):
2353
    if level == locking.LEVEL_NODEGROUP:
2354
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2355
      # Acquire locks for the instance's nodegroups optimistically. Needs
2356
      # to be verified in CheckPrereq
2357
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2358
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2359
    elif level == locking.LEVEL_NODE:
2360
      self._LockInstancesNodes()
2361
      if self.op.disk_template and self.op.remote_node:
2362
        self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2363
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2364
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2365
      # Copy node locks
2366
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2367
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2368

    
2369
  def BuildHooksEnv(self):
2370
    """Build hooks env.
2371

2372
    This runs on the master, primary and secondaries.
2373

2374
    """
2375
    args = {}
2376
    if constants.BE_MINMEM in self.be_new:
2377
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2378
    if constants.BE_MAXMEM in self.be_new:
2379
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2380
    if constants.BE_VCPUS in self.be_new:
2381
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2382
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2383
    # information at all.
2384

    
2385
    if self._new_nics is not None:
2386
      nics = []
2387

    
2388
      for nic in self._new_nics:
2389
        n = copy.deepcopy(nic)
2390
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2391
        n.nicparams = nicparams
2392
        nics.append(NICToTuple(self, n))
2393

    
2394
      args["nics"] = nics
2395

    
2396
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2397
    if self.op.disk_template:
2398
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2399
    if self.op.runtime_mem:
2400
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2401

    
2402
    return env
2403

    
2404
  def BuildHooksNodes(self):
2405
    """Build hooks nodes.
2406

2407
    """
2408
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2409
    return (nl, nl)
2410

    
2411
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2412
                              old_params, cluster, pnode):
2413

    
2414
    update_params_dict = dict([(key, params[key])
2415
                               for key in constants.NICS_PARAMETERS
2416
                               if key in params])
2417

    
2418
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2419
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2420

    
2421
    new_net_uuid = None
2422
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2423
    if new_net_uuid_or_name:
2424
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2425
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2426

    
2427
    if old_net_uuid:
2428
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2429

    
2430
    if new_net_uuid:
2431
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2432
      if not netparams:
2433
        raise errors.OpPrereqError("No netparams found for the network"
2434
                                   " %s, probably not connected" %
2435
                                   new_net_obj.name, errors.ECODE_INVAL)
2436
      new_params = dict(netparams)
2437
    else:
2438
      new_params = GetUpdatedParams(old_params, update_params_dict)
2439

    
2440
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2441

    
2442
    new_filled_params = cluster.SimpleFillNIC(new_params)
2443
    objects.NIC.CheckParameterSyntax(new_filled_params)
2444

    
2445
    new_mode = new_filled_params[constants.NIC_MODE]
2446
    if new_mode == constants.NIC_MODE_BRIDGED:
2447
      bridge = new_filled_params[constants.NIC_LINK]
2448
      msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2449
      if msg:
2450
        msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2451
        if self.op.force:
2452
          self.warn.append(msg)
2453
        else:
2454
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2455

    
2456
    elif new_mode == constants.NIC_MODE_ROUTED:
2457
      ip = params.get(constants.INIC_IP, old_ip)
2458
      if ip is None:
2459
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2460
                                   " on a routed NIC", errors.ECODE_INVAL)
2461

    
2462
    elif new_mode == constants.NIC_MODE_OVS:
2463
      # TODO: check OVS link
2464
      self.LogInfo("OVS links are currently not checked for correctness")
2465

    
2466
    if constants.INIC_MAC in params:
2467
      mac = params[constants.INIC_MAC]
2468
      if mac is None:
2469
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2470
                                   errors.ECODE_INVAL)
2471
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2472
        # otherwise generate the MAC address
2473
        params[constants.INIC_MAC] = \
2474
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2475
      else:
2476
        # or validate/reserve the current one
2477
        try:
2478
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2479
        except errors.ReservationError:
2480
          raise errors.OpPrereqError("MAC address '%s' already in use"
2481
                                     " in cluster" % mac,
2482
                                     errors.ECODE_NOTUNIQUE)
2483
    elif new_net_uuid != old_net_uuid:
2484

    
2485
      def get_net_prefix(net_uuid):
2486
        mac_prefix = None
2487
        if net_uuid:
2488
          nobj = self.cfg.GetNetwork(net_uuid)
2489
          mac_prefix = nobj.mac_prefix
2490

    
2491
        return mac_prefix
2492

    
2493
      new_prefix = get_net_prefix(new_net_uuid)
2494
      old_prefix = get_net_prefix(old_net_uuid)
2495
      if old_prefix != new_prefix:
2496
        params[constants.INIC_MAC] = \
2497
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2498

    
2499
    # if there is a change in (ip, network) tuple
2500
    new_ip = params.get(constants.INIC_IP, old_ip)
2501
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2502
      if new_ip:
2503
        # if IP is pool then require a network and generate one IP
2504
        if new_ip.lower() == constants.NIC_IP_POOL:
2505
          if new_net_uuid:
2506
            try:
2507
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2508
            except errors.ReservationError:
2509
              raise errors.OpPrereqError("Unable to get a free IP"
2510
                                         " from the address pool",
2511
                                         errors.ECODE_STATE)
2512
            self.LogInfo("Chose IP %s from network %s",
2513
                         new_ip,
2514
                         new_net_obj.name)
2515
            params[constants.INIC_IP] = new_ip
2516
          else:
2517
            raise errors.OpPrereqError("ip=pool, but no network found",
2518
                                       errors.ECODE_INVAL)
2519
        # Reserve new IP if in the new network if any
2520
        elif new_net_uuid:
2521
          try:
2522
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2523
            self.LogInfo("Reserving IP %s in network %s",
2524
                         new_ip, new_net_obj.name)
2525
          except errors.ReservationError:
2526
            raise errors.OpPrereqError("IP %s not available in network %s" %
2527
                                       (new_ip, new_net_obj.name),
2528
                                       errors.ECODE_NOTUNIQUE)
2529
        # new network is None so check if new IP is a conflicting IP
2530
        elif self.op.conflicts_check:
2531
          _CheckForConflictingIp(self, new_ip, pnode)
2532

    
2533
      # release old IP if old network is not None
2534
      if old_ip and old_net_uuid:
2535
        try:
2536
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2537
        except errors.AddressPoolError:
2538
          logging.warning("Release IP %s not contained in network %s",
2539
                          old_ip, old_net_obj.name)
2540

    
2541
    # there are no changes in (ip, network) tuple and old network is not None
2542
    elif (old_net_uuid is not None and
2543
          (req_link is not None or req_mode is not None)):
2544
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2545
                                 " a NIC that is connected to a network",
2546
                                 errors.ECODE_INVAL)
2547

    
2548
    private.params = new_params
2549
    private.filled = new_filled_params
2550

    
2551
  def _PreCheckDiskTemplate(self, pnode_info):
2552
    """CheckPrereq checks related to a new disk template."""
2553
    # Arguments are passed to avoid configuration lookups
2554
    instance = self.instance
2555
    pnode = instance.primary_node
2556
    cluster = self.cluster
2557
    if instance.disk_template == self.op.disk_template:
2558
      raise errors.OpPrereqError("Instance already has disk template %s" %
2559
                                 instance.disk_template, errors.ECODE_INVAL)
2560

    
2561
    if (instance.disk_template,
2562
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2563
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2564
                                 " %s to %s" % (instance.disk_template,
2565
                                                self.op.disk_template),
2566
                                 errors.ECODE_INVAL)
2567
    CheckInstanceState(self, instance, INSTANCE_DOWN,
2568
                       msg="cannot change disk template")
2569
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2570
      if self.op.remote_node == pnode:
2571
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2572
                                   " as the primary node of the instance" %
2573
                                   self.op.remote_node, errors.ECODE_STATE)
2574
      CheckNodeOnline(self, self.op.remote_node)
2575
      CheckNodeNotDrained(self, self.op.remote_node)
2576
      # FIXME: here we assume that the old instance type is DT_PLAIN
2577
      assert instance.disk_template == constants.DT_PLAIN
2578
      disks = [{constants.IDISK_SIZE: d.size,
2579
                constants.IDISK_VG: d.logical_id[0]}
2580
               for d in instance.disks]
2581
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2582
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2583

    
2584
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2585
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2586
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2587
                                                              snode_group)
2588
      CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2589
                             ignore=self.op.ignore_ipolicy)
2590
      if pnode_info.group != snode_info.group:
2591
        self.LogWarning("The primary and secondary nodes are in two"
2592
                        " different node groups; the disk parameters"
2593
                        " from the first disk's node group will be"
2594
                        " used")
2595

    
2596
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2597
      # Make sure none of the nodes require exclusive storage
2598
      nodes = [pnode_info]
2599
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2600
        assert snode_info
2601
        nodes.append(snode_info)
2602
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2603
      if compat.any(map(has_es, nodes)):
2604
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2605
                  " storage is enabled" % (instance.disk_template,
2606
                                           self.op.disk_template))
2607
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2608

    
2609
  def _PreCheckDisks(self, ispec):
2610
    """CheckPrereq checks related to disk changes.
2611

2612
    @type ispec: dict
2613
    @param ispec: instance specs to be updated with the new disks
2614

2615
    """
2616
    instance = self.instance
2617
    self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2618

    
2619
    excl_stor = compat.any(
2620
      rpc.GetExclusiveStorageForNodeNames(self.cfg, instance.all_nodes).values()
2621
      )
2622

    
2623
    # Check disk modifications. This is done here and not in CheckArguments
2624
    # (as with NICs), because we need to know the instance's disk template
2625
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2626
    if instance.disk_template == constants.DT_EXT:
2627
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2628
    else:
2629
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2630
                      ver_fn)
2631

    
2632
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2633

    
2634
    # Check the validity of the `provider' parameter
2635
    if instance.disk_template in constants.DT_EXT:
2636
      for mod in self.diskmod:
2637
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2638
        if mod[0] == constants.DDM_ADD:
2639
          if ext_provider is None:
2640
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2641
                                       " '%s' missing, during disk add" %
2642
                                       (constants.DT_EXT,
2643
                                        constants.IDISK_PROVIDER),
2644
                                       errors.ECODE_NOENT)
2645
        elif mod[0] == constants.DDM_MODIFY:
2646
          if ext_provider:
2647
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2648
                                       " modification" %
2649
                                       constants.IDISK_PROVIDER,
2650
                                       errors.ECODE_INVAL)
2651
    else:
2652
      for mod in self.diskmod:
2653
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2654
        if ext_provider is not None:
2655
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2656
                                     " instances of type '%s'" %
2657
                                     (constants.IDISK_PROVIDER,
2658
                                      constants.DT_EXT),
2659
                                     errors.ECODE_INVAL)
2660

    
2661
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2662
      raise errors.OpPrereqError("Disk operations not supported for"
2663
                                 " diskless instances", errors.ECODE_INVAL)
2664

    
2665
    def _PrepareDiskMod(_, disk, params, __):
2666
      disk.name = params.get(constants.IDISK_NAME, None)
2667

    
2668
    # Verify disk changes (operating on a copy)
2669
    disks = copy.deepcopy(instance.disks)
2670
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2671
                        _PrepareDiskMod, None)
2672
    utils.ValidateDeviceNames("disk", disks)
2673
    if len(disks) > constants.MAX_DISKS:
2674
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2675
                                 " more" % constants.MAX_DISKS,
2676
                                 errors.ECODE_STATE)
2677
    disk_sizes = [disk.size for disk in instance.disks]
2678
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2679
                      self.diskmod if op == constants.DDM_ADD)
2680
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2681
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2682

    
2683
    if self.op.offline is not None and self.op.offline:
2684
      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2685
                         msg="can't change to offline")
2686

    
2687
  def CheckPrereq(self):
2688
    """Check prerequisites.
2689

2690
    This only checks the instance list against the existing names.
2691

2692
    """
2693
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2694
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2695

    
2696
    cluster = self.cluster = self.cfg.GetClusterInfo()
2697
    assert self.instance is not None, \
2698
      "Cannot retrieve locked instance %s" % self.op.instance_name
2699

    
2700
    pnode = instance.primary_node
2701

    
2702
    self.warn = []
2703

    
2704
    if (self.op.pnode is not None and self.op.pnode != pnode and
2705
        not self.op.force):
2706
      # verify that the instance is not up
2707
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2708
                                                  instance.hypervisor)
2709
      if instance_info.fail_msg:
2710
        self.warn.append("Can't get instance runtime information: %s" %
2711
                         instance_info.fail_msg)
2712
      elif instance_info.payload:
2713
        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2714
                                   errors.ECODE_STATE)
2715

    
2716
    assert pnode in self.owned_locks(locking.LEVEL_NODE)
2717
    nodelist = list(instance.all_nodes)
2718
    pnode_info = self.cfg.GetNodeInfo(pnode)
2719

    
2720
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2721
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2722
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2723

    
2724
    # dictionary with instance information after the modification
2725
    ispec = {}
2726

    
2727
    # Prepare NIC modifications
2728
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2729

    
2730
    # OS change
2731
    if self.op.os_name and not self.op.force:
2732
      CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2733
                     self.op.force_variant)
2734
      instance_os = self.op.os_name
2735
    else:
2736
      instance_os = instance.os
2737

    
2738
    assert not (self.op.disk_template and self.op.disks), \
2739
      "Can't modify disk template and apply disk changes at the same time"
2740

    
2741
    if self.op.disk_template:
2742
      self._PreCheckDiskTemplate(pnode_info)
2743

    
2744
    self._PreCheckDisks(ispec)
2745

    
2746
    # hvparams processing
2747
    if self.op.hvparams:
2748
      hv_type = instance.hypervisor
2749
      i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2750
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2751
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2752

    
2753
      # local check
2754
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2755
      CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2756
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2757
      self.hv_inst = i_hvdict # the new dict (without defaults)
2758
    else:
2759
      self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2760
                                              instance.hvparams)
2761
      self.hv_new = self.hv_inst = {}
2762

    
2763
    # beparams processing
2764
    if self.op.beparams:
2765
      i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2766
                                  use_none=True)
2767
      objects.UpgradeBeParams(i_bedict)
2768
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2769
      be_new = cluster.SimpleFillBE(i_bedict)
2770
      self.be_proposed = self.be_new = be_new # the new actual values
2771
      self.be_inst = i_bedict # the new dict (without defaults)
2772
    else:
2773
      self.be_new = self.be_inst = {}
2774
      self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2775
    be_old = cluster.FillBE(instance)
2776

    
2777
    # CPU param validation -- checking every time a parameter is
2778
    # changed to cover all cases where either CPU mask or vcpus have
2779
    # changed
2780
    if (constants.BE_VCPUS in self.be_proposed and
2781
        constants.HV_CPU_MASK in self.hv_proposed):
2782
      cpu_list = \
2783
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2784
      # Verify mask is consistent with number of vCPUs. Can skip this
2785
      # test if only 1 entry in the CPU mask, which means same mask
2786
      # is applied to all vCPUs.
2787
      if (len(cpu_list) > 1 and
2788
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2789
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2790
                                   " CPU mask [%s]" %
2791
                                   (self.be_proposed[constants.BE_VCPUS],
2792
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2793
                                   errors.ECODE_INVAL)
2794

    
2795
      # Only perform this test if a new CPU mask is given
2796
      if constants.HV_CPU_MASK in self.hv_new:
2797
        # Calculate the largest CPU number requested
2798
        max_requested_cpu = max(map(max, cpu_list))
2799
        # Check that all of the instance's nodes have enough physical CPUs to
2800
        # satisfy the requested CPU mask
2801
        hvspecs = [(instance.hypervisor,
2802
                    self.cfg.GetClusterInfo().hvparams[instance.hypervisor])]
2803
        _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2804
                                max_requested_cpu + 1,
2805
                                hvspecs)
2806

    
2807
    # osparams processing
2808
    if self.op.osparams:
2809
      i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2810
      CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2811
      self.os_inst = i_osdict # the new dict (without defaults)
2812
    else:
2813
      self.os_inst = {}
2814

    
2815
    #TODO(dynmem): do the appropriate check involving MINMEM
2816
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2817
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2818
      mem_check_list = [pnode]
2819
      if be_new[constants.BE_AUTO_BALANCE]:
2820
        # either we changed auto_balance to yes or it was from before
2821
        mem_check_list.extend(instance.secondary_nodes)
2822
      instance_info = self.rpc.call_instance_info(
2823
          pnode, instance.name, instance.hypervisor,
2824
          cluster.hvparams[instance.hypervisor])
2825
      hvspecs = [(instance.hypervisor, cluster.hvparams[instance.hypervisor])]
2826
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2827
                                         hvspecs, False)
2828
      pninfo = nodeinfo[pnode]
2829
      msg = pninfo.fail_msg
2830
      if msg:
2831
        # Assume the primary node is unreachable and go ahead
2832
        self.warn.append("Can't get info from primary node %s: %s" %
2833
                         (pnode, msg))
2834
      else:
2835
        (_, _, (pnhvinfo, )) = pninfo.payload
2836
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2837
          self.warn.append("Node data from primary node %s doesn't contain"
2838
                           " free memory information" % pnode)
2839
        elif instance_info.fail_msg:
2840
          self.warn.append("Can't get instance runtime information: %s" %
2841
                           instance_info.fail_msg)
2842
        else:
2843
          if instance_info.payload:
2844
            current_mem = int(instance_info.payload["memory"])
2845
          else:
2846
            # Assume instance not running
2847
            # (there is a slight race condition here, but it's not very
2848
            # probable, and we have no other way to check)
2849
            # TODO: Describe race condition
2850
            current_mem = 0
2851
          #TODO(dynmem): do the appropriate check involving MINMEM
2852
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2853
                      pnhvinfo["memory_free"])
2854
          if miss_mem > 0:
2855
            raise errors.OpPrereqError("This change will prevent the instance"
2856
                                       " from starting, due to %d MB of memory"
2857
                                       " missing on its primary node" %
2858
                                       miss_mem, errors.ECODE_NORES)
2859

    
2860
      if be_new[constants.BE_AUTO_BALANCE]:
2861
        for node, nres in nodeinfo.items():
2862
          if node not in instance.secondary_nodes:
2863
            continue
2864
          nres.Raise("Can't get info from secondary node %s" % node,
2865
                     prereq=True, ecode=errors.ECODE_STATE)
2866
          (_, _, (nhvinfo, )) = nres.payload
2867
          if not isinstance(nhvinfo.get("memory_free", None), int):
2868
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2869
                                       " memory information" % node,
2870
                                       errors.ECODE_STATE)
2871
          #TODO(dynmem): do the appropriate check involving MINMEM
2872
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2873
            raise errors.OpPrereqError("This change will prevent the instance"
2874
                                       " from failover to its secondary node"
2875
                                       " %s, due to not enough memory" % node,
2876
                                       errors.ECODE_STATE)
2877

    
2878
    if self.op.runtime_mem:
2879
      remote_info = self.rpc.call_instance_info(instance.primary_node,
2880
                                                instance.name,
2881
                                                instance.hypervisor)
2882
      remote_info.Raise("Error checking node %s" % instance.primary_node)
2883
      if not remote_info.payload: # not running already
2884
        raise errors.OpPrereqError("Instance %s is not running" %
2885
                                   instance.name, errors.ECODE_STATE)
2886

    
2887
      current_memory = remote_info.payload["memory"]
2888
      if (not self.op.force and
2889
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2890
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2891
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2892
                                   " and %d MB of memory unless --force is"
2893
                                   " given" %
2894
                                   (instance.name,
2895
                                    self.be_proposed[constants.BE_MINMEM],
2896
                                    self.be_proposed[constants.BE_MAXMEM]),
2897
                                   errors.ECODE_INVAL)
2898

    
2899
      delta = self.op.runtime_mem - current_memory
2900
      if delta > 0:
2901
        CheckNodeFreeMemory(
2902
            self, instance.primary_node, "ballooning memory for instance %s" %
2903
            instance.name, delta, instance.hypervisor,
2904
            self.cfg.GetClusterInfo().hvparams[instance.hypervisor])
2905

    
2906
    def _PrepareNicCreate(_, params, private):
2907
      self._PrepareNicModification(params, private, None, None,
2908
                                   {}, cluster, pnode)
2909
      return (None, None)
2910

    
2911
    def _PrepareNicMod(_, nic, params, private):
2912
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2913
                                   nic.nicparams, cluster, pnode)
2914
      return None
2915

    
2916
    def _PrepareNicRemove(_, params, __):
2917
      ip = params.ip
2918
      net = params.network
2919
      if net is not None and ip is not None:
2920
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2921

    
2922
    # Verify NIC changes (operating on copy)
2923
    nics = instance.nics[:]
2924
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2925
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2926
    if len(nics) > constants.MAX_NICS:
2927
      raise errors.OpPrereqError("Instance has too many network interfaces"
2928
                                 " (%d), cannot add more" % constants.MAX_NICS,
2929
                                 errors.ECODE_STATE)
2930

    
2931
    # Pre-compute NIC changes (necessary to use result in hooks)
2932
    self._nic_chgdesc = []
2933
    if self.nicmod:
2934
      # Operate on copies as this is still in prereq
2935
      nics = [nic.Copy() for nic in instance.nics]
2936
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2937
                          self._CreateNewNic, self._ApplyNicMods, None)
2938
      # Verify that NIC names are unique and valid
2939
      utils.ValidateDeviceNames("NIC", nics)
2940
      self._new_nics = nics
2941
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2942
    else:
2943
      self._new_nics = None
2944
      ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2945

    
2946
    if not self.op.ignore_ipolicy:
2947
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2948
                                                              group_info)
2949

    
2950
      # Fill ispec with backend parameters
2951
      ispec[constants.ISPEC_SPINDLE_USE] = \
2952
        self.be_new.get(constants.BE_SPINDLE_USE, None)
2953
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2954
                                                         None)
2955

    
2956
      # Copy ispec to verify parameters with min/max values separately
2957
      if self.op.disk_template:
2958
        new_disk_template = self.op.disk_template
2959
      else:
2960
        new_disk_template = instance.disk_template
2961
      ispec_max = ispec.copy()
2962
      ispec_max[constants.ISPEC_MEM_SIZE] = \
2963
        self.be_new.get(constants.BE_MAXMEM, None)
2964
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
2965
                                                     new_disk_template)
2966
      ispec_min = ispec.copy()
2967
      ispec_min[constants.ISPEC_MEM_SIZE] = \
2968
        self.be_new.get(constants.BE_MINMEM, None)
2969
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
2970
                                                     new_disk_template)
2971

    
2972
      if (res_max or res_min):
2973
        # FIXME: Improve error message by including information about whether
2974
        # the upper or lower limit of the parameter fails the ipolicy.
2975
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
2976
               (group_info, group_info.name,
2977
                utils.CommaJoin(set(res_max + res_min))))
2978
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2979

    
2980
  def _ConvertPlainToDrbd(self, feedback_fn):
2981
    """Converts an instance from plain to drbd.
2982

2983
    """
2984
    feedback_fn("Converting template to drbd")
2985
    instance = self.instance
2986
    pnode = instance.primary_node
2987
    snode = self.op.remote_node
2988

    
2989
    assert instance.disk_template == constants.DT_PLAIN
2990

    
2991
    # create a fake disk info for _GenerateDiskTemplate
2992
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
2993
                  constants.IDISK_VG: d.logical_id[0],
2994
                  constants.IDISK_NAME: d.name}
2995
                 for d in instance.disks]
2996
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
2997
                                     instance.name, pnode, [snode],
2998
                                     disk_info, None, None, 0, feedback_fn,
2999
                                     self.diskparams)
3000
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3001
                                        self.diskparams)
3002
    p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
3003
    s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
3004
    info = GetInstanceInfoText(instance)
3005
    feedback_fn("Creating additional volumes...")
3006
    # first, create the missing data and meta devices
3007
    for disk in anno_disks:
3008
      # unfortunately this is... not too nice
3009
      CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3010
                           info, True, p_excl_stor)
3011
      for child in disk.children:
3012
        CreateSingleBlockDev(self, snode, instance, child, info, True,
3013
                             s_excl_stor)
3014
    # at this stage, all new LVs have been created, we can rename the
3015
    # old ones
3016
    feedback_fn("Renaming original volumes...")
3017
    rename_list = [(o, n.children[0].logical_id)
3018
                   for (o, n) in zip(instance.disks, new_disks)]
3019
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
3020
    result.Raise("Failed to rename original LVs")
3021

    
3022
    feedback_fn("Initializing DRBD devices...")
3023
    # all child devices are in place, we can now create the DRBD devices
3024
    try:
3025
      for disk in anno_disks:
3026
        for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3027
          f_create = node == pnode
3028
          CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3029
                               excl_stor)
3030
    except errors.GenericError, e:
3031
      feedback_fn("Initializing of DRBD devices failed;"
3032
                  " renaming back original volumes...")
3033
      for disk in new_disks:
3034
        self.cfg.SetDiskID(disk, pnode)
3035
      rename_back_list = [(n.children[0], o.logical_id)
3036
                          for (n, o) in zip(new_disks, instance.disks)]
3037
      result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3038
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3039
      raise
3040

    
3041
    # at this point, the instance has been modified
3042
    instance.disk_template = constants.DT_DRBD8
3043
    instance.disks = new_disks
3044
    self.cfg.Update(instance, feedback_fn)
3045

    
3046
    # Release node locks while waiting for sync
3047
    ReleaseLocks(self, locking.LEVEL_NODE)
3048

    
3049
    # disks are created, waiting for sync
3050
    disk_abort = not WaitForSync(self, instance,
3051
                                 oneshot=not self.op.wait_for_sync)
3052
    if disk_abort:
3053
      raise errors.OpExecError("There are some degraded disks for"
3054
                               " this instance, please cleanup manually")
3055

    
3056
    # Node resource locks will be released by caller
3057

    
3058
  def _ConvertDrbdToPlain(self, feedback_fn):
3059
    """Converts an instance from drbd to plain.
3060

3061
    """
3062
    instance = self.instance
3063

    
3064
    assert len(instance.secondary_nodes) == 1
3065
    assert instance.disk_template == constants.DT_DRBD8
3066

    
3067
    pnode = instance.primary_node
3068
    snode = instance.secondary_nodes[0]
3069
    feedback_fn("Converting template to plain")
3070

    
3071
    old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3072
    new_disks = [d.children[0] for d in instance.disks]
3073

    
3074
    # copy over size, mode and name
3075
    for parent, child in zip(old_disks, new_disks):
3076
      child.size = parent.size
3077
      child.mode = parent.mode
3078
      child.name = parent.name
3079

    
3080
    # this is a DRBD disk, return its port to the pool
3081
    # NOTE: this must be done right before the call to cfg.Update!
3082
    for disk in old_disks:
3083
      tcp_port = disk.logical_id[2]
3084
      self.cfg.AddTcpUdpPort(tcp_port)
3085

    
3086
    # update instance structure
3087
    instance.disks = new_disks
3088
    instance.disk_template = constants.DT_PLAIN
3089
    _UpdateIvNames(0, instance.disks)
3090
    self.cfg.Update(instance, feedback_fn)
3091

    
3092
    # Release locks in case removing disks takes a while
3093
    ReleaseLocks(self, locking.LEVEL_NODE)
3094

    
3095
    feedback_fn("Removing volumes on the secondary node...")
3096
    for disk in old_disks:
3097
      self.cfg.SetDiskID(disk, snode)
3098
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3099
      if msg:
3100
        self.LogWarning("Could not remove block device %s on node %s,"
3101
                        " continuing anyway: %s", disk.iv_name, snode, msg)
3102

    
3103
    feedback_fn("Removing unneeded volumes on the primary node...")
3104
    for idx, disk in enumerate(old_disks):
3105
      meta = disk.children[1]
3106
      self.cfg.SetDiskID(meta, pnode)
3107
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3108
      if msg:
3109
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3110
                        " continuing anyway: %s", idx, pnode, msg)
3111

    
3112
  def _CreateNewDisk(self, idx, params, _):
3113
    """Creates a new disk.
3114

3115
    """
3116
    instance = self.instance
3117

    
3118
    # add a new disk
3119
    if instance.disk_template in constants.DTS_FILEBASED:
3120
      (file_driver, file_path) = instance.disks[0].logical_id
3121
      file_path = os.path.dirname(file_path)
3122
    else:
3123
      file_driver = file_path = None
3124

    
3125
    disk = \
3126
      GenerateDiskTemplate(self, instance.disk_template, instance.name,
3127
                           instance.primary_node, instance.secondary_nodes,
3128
                           [params], file_path, file_driver, idx,
3129
                           self.Log, self.diskparams)[0]
3130

    
3131
    new_disks = CreateDisks(self, instance, disks=[disk])
3132

    
3133
    if self.cluster.prealloc_wipe_disks:
3134
      # Wipe new disk
3135
      WipeOrCleanupDisks(self, instance,
3136
                         disks=[(idx, disk, 0)],
3137
                         cleanup=new_disks)
3138

    
3139
    return (disk, [
3140
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3141
      ])
3142

    
3143
  @staticmethod
3144
  def _ModifyDisk(idx, disk, params, _):
3145
    """Modifies a disk.
3146

3147
    """
3148
    changes = []
3149
    mode = params.get(constants.IDISK_MODE, None)
3150
    if mode:
3151
      disk.mode = mode
3152
      changes.append(("disk.mode/%d" % idx, disk.mode))
3153

    
3154
    name = params.get(constants.IDISK_NAME, None)
3155
    disk.name = name
3156
    changes.append(("disk.name/%d" % idx, disk.name))
3157

    
3158
    return changes
3159

    
3160
  def _RemoveDisk(self, idx, root, _):
3161
    """Removes a disk.
3162

3163
    """
3164
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3165
    for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3166
      self.cfg.SetDiskID(disk, node)
3167
      msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3168
      if msg:
3169
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3170
                        " continuing anyway", idx, node, msg)
3171

    
3172
    # if this is a DRBD disk, return its port to the pool
3173
    if root.dev_type in constants.LDS_DRBD:
3174
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3175

    
3176
  def _CreateNewNic(self, idx, params, private):
3177
    """Creates data structure for a new network interface.
3178

3179
    """
3180
    mac = params[constants.INIC_MAC]
3181
    ip = params.get(constants.INIC_IP, None)
3182
    net = params.get(constants.INIC_NETWORK, None)
3183
    name = params.get(constants.INIC_NAME, None)
3184
    net_uuid = self.cfg.LookupNetwork(net)
3185
    #TODO: not private.filled?? can a nic have no nicparams??
3186
    nicparams = private.filled
3187
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3188
                       nicparams=nicparams)
3189
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3190

    
3191
    return (nobj, [
3192
      ("nic.%d" % idx,
3193
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3194
       (mac, ip, private.filled[constants.NIC_MODE],
3195
       private.filled[constants.NIC_LINK],
3196
       net)),
3197
      ])
3198

    
3199
  def _ApplyNicMods(self, idx, nic, params, private):
3200
    """Modifies a network interface.
3201

3202
    """
3203
    changes = []
3204

    
3205
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3206
      if key in params:
3207
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3208
        setattr(nic, key, params[key])
3209

    
3210
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3211
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3212
    if new_net_uuid != nic.network:
3213
      changes.append(("nic.network/%d" % idx, new_net))
3214
      nic.network = new_net_uuid
3215

    
3216
    if private.filled:
3217
      nic.nicparams = private.filled
3218

    
3219
      for (key, val) in nic.nicparams.items():
3220
        changes.append(("nic.%s/%d" % (key, idx), val))
3221

    
3222
    return changes
3223

    
3224
  def Exec(self, feedback_fn):
3225
    """Modifies an instance.
3226

3227
    All parameters take effect only at the next restart of the instance.
3228

3229
    """
3230
    # Process here the warnings from CheckPrereq, as we don't have a
3231
    # feedback_fn there.
3232
    # TODO: Replace with self.LogWarning
3233
    for warn in self.warn:
3234
      feedback_fn("WARNING: %s" % warn)
3235

    
3236
    assert ((self.op.disk_template is None) ^
3237
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3238
      "Not owning any node resource locks"
3239

    
3240
    result = []
3241
    instance = self.instance
3242

    
3243
    # New primary node
3244
    if self.op.pnode:
3245
      instance.primary_node = self.op.pnode
3246

    
3247
    # runtime memory
3248
    if self.op.runtime_mem:
3249
      rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3250
                                                     instance,
3251
                                                     self.op.runtime_mem)
3252
      rpcres.Raise("Cannot modify instance runtime memory")
3253
      result.append(("runtime_memory", self.op.runtime_mem))
3254

    
3255
    # Apply disk changes
3256
    _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3257
                        self._CreateNewDisk, self._ModifyDisk,
3258
                        self._RemoveDisk)
3259
    _UpdateIvNames(0, instance.disks)
3260

    
3261
    if self.op.disk_template:
3262
      if __debug__:
3263
        check_nodes = set(instance.all_nodes)
3264
        if self.op.remote_node:
3265
          check_nodes.add(self.op.remote_node)
3266
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3267
          owned = self.owned_locks(level)
3268
          assert not (check_nodes - owned), \
3269
            ("Not owning the correct locks, owning %r, expected at least %r" %
3270
             (owned, check_nodes))
3271

    
3272
      r_shut = ShutdownInstanceDisks(self, instance)
3273
      if not r_shut:
3274
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3275
                                 " proceed with disk template conversion")
3276
      mode = (instance.disk_template, self.op.disk_template)
3277
      try:
3278
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3279
      except:
3280
        self.cfg.ReleaseDRBDMinors(instance.name)
3281
        raise
3282
      result.append(("disk_template", self.op.disk_template))
3283

    
3284
      assert instance.disk_template == self.op.disk_template, \
3285
        ("Expected disk template '%s', found '%s'" %
3286
         (self.op.disk_template, instance.disk_template))
3287

    
3288
    # Release node and resource locks if there are any (they might already have
3289
    # been released during disk conversion)
3290
    ReleaseLocks(self, locking.LEVEL_NODE)
3291
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3292

    
3293
    # Apply NIC changes
3294
    if self._new_nics is not None:
3295
      instance.nics = self._new_nics
3296
      result.extend(self._nic_chgdesc)
3297

    
3298
    # hvparams changes
3299
    if self.op.hvparams:
3300
      instance.hvparams = self.hv_inst
3301
      for key, val in self.op.hvparams.iteritems():
3302
        result.append(("hv/%s" % key, val))
3303

    
3304
    # beparams changes
3305
    if self.op.beparams:
3306
      instance.beparams = self.be_inst
3307
      for key, val in self.op.beparams.iteritems():
3308
        result.append(("be/%s" % key, val))
3309

    
3310
    # OS change
3311
    if self.op.os_name:
3312
      instance.os = self.op.os_name
3313

    
3314
    # osparams changes
3315
    if self.op.osparams:
3316
      instance.osparams = self.os_inst
3317
      for key, val in self.op.osparams.iteritems():
3318
        result.append(("os/%s" % key, val))
3319

    
3320
    if self.op.offline is None:
3321
      # Ignore
3322
      pass
3323
    elif self.op.offline:
3324
      # Mark instance as offline
3325
      self.cfg.MarkInstanceOffline(instance.name)
3326
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3327
    else:
3328
      # Mark instance as online, but stopped
3329
      self.cfg.MarkInstanceDown(instance.name)
3330
      result.append(("admin_state", constants.ADMINST_DOWN))
3331

    
3332
    self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3333

    
3334
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3335
                self.owned_locks(locking.LEVEL_NODE)), \
3336
      "All node locks should have been released by now"
3337

    
3338
    return result
3339

    
3340
  _DISK_CONVERSIONS = {
3341
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3342
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3343
    }
3344

    
3345

    
3346
class LUInstanceChangeGroup(LogicalUnit):
3347
  HPATH = "instance-change-group"
3348
  HTYPE = constants.HTYPE_INSTANCE
3349
  REQ_BGL = False
3350

    
3351
  def ExpandNames(self):
3352
    self.share_locks = ShareAll()
3353

    
3354
    self.needed_locks = {
3355
      locking.LEVEL_NODEGROUP: [],
3356
      locking.LEVEL_NODE: [],
3357
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3358
      }
3359

    
3360
    self._ExpandAndLockInstance()
3361

    
3362
    if self.op.target_groups:
3363
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3364
                                  self.op.target_groups)
3365
    else:
3366
      self.req_target_uuids = None
3367

    
3368
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3369

    
3370
  def DeclareLocks(self, level):
3371
    if level == locking.LEVEL_NODEGROUP:
3372
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3373

    
3374
      if self.req_target_uuids:
3375
        lock_groups = set(self.req_target_uuids)
3376

    
3377
        # Lock all groups used by instance optimistically; this requires going
3378
        # via the node before it's locked, requiring verification later on
3379
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3380
        lock_groups.update(instance_groups)
3381
      else:
3382
        # No target groups, need to lock all of them
3383
        lock_groups = locking.ALL_SET
3384

    
3385
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3386

    
3387
    elif level == locking.LEVEL_NODE:
3388
      if self.req_target_uuids:
3389
        # Lock all nodes used by instances
3390
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3391
        self._LockInstancesNodes()
3392

    
3393
        # Lock all nodes in all potential target groups
3394
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3395
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3396
        member_nodes = [node_name
3397
                        for group in lock_groups
3398
                        for node_name in self.cfg.GetNodeGroup(group).members]
3399
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3400
      else:
3401
        # Lock all nodes as all groups are potential targets
3402
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3403

    
3404
  def CheckPrereq(self):
3405
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3406
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3407
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3408

    
3409
    assert (self.req_target_uuids is None or
3410
            owned_groups.issuperset(self.req_target_uuids))
3411
    assert owned_instances == set([self.op.instance_name])
3412

    
3413
    # Get instance information
3414
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3415

    
3416
    # Check if node groups for locked instance are still correct
3417
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3418
      ("Instance %s's nodes changed while we kept the lock" %
3419
       self.op.instance_name)
3420

    
3421
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3422
                                          owned_groups)
3423

    
3424
    if self.req_target_uuids:
3425
      # User requested specific target groups
3426
      self.target_uuids = frozenset(self.req_target_uuids)
3427
    else:
3428
      # All groups except those used by the instance are potential targets
3429
      self.target_uuids = owned_groups - inst_groups
3430

    
3431
    conflicting_groups = self.target_uuids & inst_groups
3432
    if conflicting_groups:
3433
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3434
                                 " used by the instance '%s'" %
3435
                                 (utils.CommaJoin(conflicting_groups),
3436
                                  self.op.instance_name),
3437
                                 errors.ECODE_INVAL)
3438

    
3439
    if not self.target_uuids:
3440
      raise errors.OpPrereqError("There are no possible target groups",
3441
                                 errors.ECODE_INVAL)
3442

    
3443
  def BuildHooksEnv(self):
3444
    """Build hooks env.
3445

3446
    """
3447
    assert self.target_uuids
3448

    
3449
    env = {
3450
      "TARGET_GROUPS": " ".join(self.target_uuids),
3451
      }
3452

    
3453
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3454

    
3455
    return env
3456

    
3457
  def BuildHooksNodes(self):
3458
    """Build hooks nodes.
3459

3460
    """
3461
    mn = self.cfg.GetMasterNode()
3462
    return ([mn], [mn])
3463

    
3464
  def Exec(self, feedback_fn):
3465
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3466

    
3467
    assert instances == [self.op.instance_name], "Instance not locked"
3468

    
3469
    req = iallocator.IAReqGroupChange(instances=instances,
3470
                                      target_groups=list(self.target_uuids))
3471
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3472

    
3473
    ial.Run(self.op.iallocator)
3474

    
3475
    if not ial.success:
3476
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3477
                                 " instance '%s' using iallocator '%s': %s" %
3478
                                 (self.op.instance_name, self.op.iallocator,
3479
                                  ial.info), errors.ECODE_NORES)
3480

    
3481
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3482

    
3483
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3484
                 " instance '%s'", len(jobs), self.op.instance_name)
3485

    
3486
    return ResultWithJobs(jobs)