Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 3f3ea14c

History | View | Annotate | Download (135 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176

    
177
    if net is None or net.lower() == constants.VALUE_NONE:
178
      net = None
179
    else:
180
      if nic_mode_req is not None or link is not None:
181
        raise errors.OpPrereqError("If network is given, no mode or link"
182
                                   " is allowed to be passed",
183
                                   errors.ECODE_INVAL)
184

    
185
    # ip validity checks
186
    if ip is None or ip.lower() == constants.VALUE_NONE:
187
      nic_ip = None
188
    elif ip.lower() == constants.VALUE_AUTO:
189
      if not op.name_check:
190
        raise errors.OpPrereqError("IP address set to auto but name checks"
191
                                   " have been skipped",
192
                                   errors.ECODE_INVAL)
193
      nic_ip = default_ip
194
    else:
195
      # We defer pool operations until later, so that the iallocator has
196
      # filled in the instance's node(s) dimara
197
      if ip.lower() == constants.NIC_IP_POOL:
198
        if net is None:
199
          raise errors.OpPrereqError("if ip=pool, parameter network"
200
                                     " must be passed too",
201
                                     errors.ECODE_INVAL)
202

    
203
      elif not netutils.IPAddress.IsValid(ip):
204
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
205
                                   errors.ECODE_INVAL)
206

    
207
      nic_ip = ip
208

    
209
    # TODO: check the ip address for uniqueness
210
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
211
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
212
                                 errors.ECODE_INVAL)
213

    
214
    # MAC address verification
215
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
216
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
217
      mac = utils.NormalizeAndValidateMac(mac)
218

    
219
      try:
220
        # TODO: We need to factor this out
221
        cfg.ReserveMAC(mac, ec_id)
222
      except errors.ReservationError:
223
        raise errors.OpPrereqError("MAC address %s already in use"
224
                                   " in cluster" % mac,
225
                                   errors.ECODE_NOTUNIQUE)
226

    
227
    #  Build nic parameters
228
    nicparams = {}
229
    if nic_mode_req:
230
      nicparams[constants.NIC_MODE] = nic_mode
231
    if link:
232
      nicparams[constants.NIC_LINK] = link
233

    
234
    check_params = cluster.SimpleFillNIC(nicparams)
235
    objects.NIC.CheckParameterSyntax(check_params)
236
    net_uuid = cfg.LookupNetwork(net)
237
    name = nic.get(constants.INIC_NAME, None)
238
    if name is not None and name.lower() == constants.VALUE_NONE:
239
      name = None
240
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
241
                          network=net_uuid, nicparams=nicparams)
242
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
243
    nics.append(nic_obj)
244

    
245
  return nics
246

    
247

    
248
def _CheckForConflictingIp(lu, ip, node):
249
  """In case of conflicting IP address raise error.
250

251
  @type ip: string
252
  @param ip: IP address
253
  @type node: string
254
  @param node: node name
255

256
  """
257
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
258
  if conf_net is not None:
259
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
260
                                " network %s, but the target NIC does not." %
261
                                (ip, conf_net)),
262
                               errors.ECODE_STATE)
263

    
264
  return (None, None)
265

    
266

    
267
def _ComputeIPolicyInstanceSpecViolation(
268
  ipolicy, instance_spec, disk_template,
269
  _compute_fn=ComputeIPolicySpecViolation):
270
  """Compute if instance specs meets the specs of ipolicy.
271

272
  @type ipolicy: dict
273
  @param ipolicy: The ipolicy to verify against
274
  @param instance_spec: dict
275
  @param instance_spec: The instance spec to verify
276
  @type disk_template: string
277
  @param disk_template: the disk template of the instance
278
  @param _compute_fn: The function to verify ipolicy (unittest only)
279
  @see: L{ComputeIPolicySpecViolation}
280

281
  """
282
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
283
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
284
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
285
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
286
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
287
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
288

    
289
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
290
                     disk_sizes, spindle_use, disk_template)
291

    
292

    
293
def _CheckOSVariant(os_obj, name):
294
  """Check whether an OS name conforms to the os variants specification.
295

296
  @type os_obj: L{objects.OS}
297
  @param os_obj: OS object to check
298
  @type name: string
299
  @param name: OS name passed by the user, to check for validity
300

301
  """
302
  variant = objects.OS.GetVariant(name)
303
  if not os_obj.supported_variants:
304
    if variant:
305
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
306
                                 " passed)" % (os_obj.name, variant),
307
                                 errors.ECODE_INVAL)
308
    return
309
  if not variant:
310
    raise errors.OpPrereqError("OS name must include a variant",
311
                               errors.ECODE_INVAL)
312

    
313
  if variant not in os_obj.supported_variants:
314
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
315

    
316

    
317
class LUInstanceCreate(LogicalUnit):
318
  """Create an instance.
319

320
  """
321
  HPATH = "instance-add"
322
  HTYPE = constants.HTYPE_INSTANCE
323
  REQ_BGL = False
324

    
325
  def CheckArguments(self):
326
    """Check arguments.
327

328
    """
329
    # do not require name_check to ease forward/backward compatibility
330
    # for tools
331
    if self.op.no_install and self.op.start:
332
      self.LogInfo("No-installation mode selected, disabling startup")
333
      self.op.start = False
334
    # validate/normalize the instance name
335
    self.op.instance_name = \
336
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
337

    
338
    if self.op.ip_check and not self.op.name_check:
339
      # TODO: make the ip check more flexible and not depend on the name check
340
      raise errors.OpPrereqError("Cannot do IP address check without a name"
341
                                 " check", errors.ECODE_INVAL)
342

    
343
    # check nics' parameter names
344
    for nic in self.op.nics:
345
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
346
    # check that NIC's parameters names are unique and valid
347
    utils.ValidateDeviceNames("NIC", self.op.nics)
348

    
349
    # check that disk's names are unique and valid
350
    utils.ValidateDeviceNames("disk", self.op.disks)
351

    
352
    cluster = self.cfg.GetClusterInfo()
353
    if not self.op.disk_template in cluster.enabled_disk_templates:
354
      raise errors.OpPrereqError("Cannot create an instance with disk template"
355
                                 " '%s', because it is not enabled in the"
356
                                 " cluster. Enabled disk templates are: %s." %
357
                                 (self.op.disk_template,
358
                                  ",".join(cluster.enabled_disk_templates)))
359

    
360
    # check disks. parameter names and consistent adopt/no-adopt strategy
361
    has_adopt = has_no_adopt = False
362
    for disk in self.op.disks:
363
      if self.op.disk_template != constants.DT_EXT:
364
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
365
      if constants.IDISK_ADOPT in disk:
366
        has_adopt = True
367
      else:
368
        has_no_adopt = True
369
    if has_adopt and has_no_adopt:
370
      raise errors.OpPrereqError("Either all disks are adopted or none is",
371
                                 errors.ECODE_INVAL)
372
    if has_adopt:
373
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
374
        raise errors.OpPrereqError("Disk adoption is not supported for the"
375
                                   " '%s' disk template" %
376
                                   self.op.disk_template,
377
                                   errors.ECODE_INVAL)
378
      if self.op.iallocator is not None:
379
        raise errors.OpPrereqError("Disk adoption not allowed with an"
380
                                   " iallocator script", errors.ECODE_INVAL)
381
      if self.op.mode == constants.INSTANCE_IMPORT:
382
        raise errors.OpPrereqError("Disk adoption not allowed for"
383
                                   " instance import", errors.ECODE_INVAL)
384
    else:
385
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
386
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
387
                                   " but no 'adopt' parameter given" %
388
                                   self.op.disk_template,
389
                                   errors.ECODE_INVAL)
390

    
391
    self.adopt_disks = has_adopt
392

    
393
    # instance name verification
394
    if self.op.name_check:
395
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
396
      self.op.instance_name = self.hostname1.name
397
      # used in CheckPrereq for ip ping check
398
      self.check_ip = self.hostname1.ip
399
    else:
400
      self.check_ip = None
401

    
402
    # file storage checks
403
    if (self.op.file_driver and
404
        not self.op.file_driver in constants.FILE_DRIVER):
405
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
406
                                 self.op.file_driver, errors.ECODE_INVAL)
407

    
408
    if self.op.disk_template == constants.DT_FILE:
409
      opcodes.RequireFileStorage()
410
    elif self.op.disk_template == constants.DT_SHARED_FILE:
411
      opcodes.RequireSharedFileStorage()
412

    
413
    ### Node/iallocator related checks
414
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
415

    
416
    if self.op.pnode is not None:
417
      if self.op.disk_template in constants.DTS_INT_MIRROR:
418
        if self.op.snode is None:
419
          raise errors.OpPrereqError("The networked disk templates need"
420
                                     " a mirror node", errors.ECODE_INVAL)
421
      elif self.op.snode:
422
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
423
                        " template")
424
        self.op.snode = None
425

    
426
    _CheckOpportunisticLocking(self.op)
427

    
428
    self._cds = GetClusterDomainSecret()
429

    
430
    if self.op.mode == constants.INSTANCE_IMPORT:
431
      # On import force_variant must be True, because if we forced it at
432
      # initial install, our only chance when importing it back is that it
433
      # works again!
434
      self.op.force_variant = True
435

    
436
      if self.op.no_install:
437
        self.LogInfo("No-installation mode has no effect during import")
438

    
439
    elif self.op.mode == constants.INSTANCE_CREATE:
440
      if self.op.os_type is None:
441
        raise errors.OpPrereqError("No guest OS specified",
442
                                   errors.ECODE_INVAL)
443
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
444
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
445
                                   " installation" % self.op.os_type,
446
                                   errors.ECODE_STATE)
447
      if self.op.disk_template is None:
448
        raise errors.OpPrereqError("No disk template specified",
449
                                   errors.ECODE_INVAL)
450

    
451
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
452
      # Check handshake to ensure both clusters have the same domain secret
453
      src_handshake = self.op.source_handshake
454
      if not src_handshake:
455
        raise errors.OpPrereqError("Missing source handshake",
456
                                   errors.ECODE_INVAL)
457

    
458
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
459
                                                           src_handshake)
460
      if errmsg:
461
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
462
                                   errors.ECODE_INVAL)
463

    
464
      # Load and check source CA
465
      self.source_x509_ca_pem = self.op.source_x509_ca
466
      if not self.source_x509_ca_pem:
467
        raise errors.OpPrereqError("Missing source X509 CA",
468
                                   errors.ECODE_INVAL)
469

    
470
      try:
471
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
472
                                                    self._cds)
473
      except OpenSSL.crypto.Error, err:
474
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
475
                                   (err, ), errors.ECODE_INVAL)
476

    
477
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
478
      if errcode is not None:
479
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
480
                                   errors.ECODE_INVAL)
481

    
482
      self.source_x509_ca = cert
483

    
484
      src_instance_name = self.op.source_instance_name
485
      if not src_instance_name:
486
        raise errors.OpPrereqError("Missing source instance name",
487
                                   errors.ECODE_INVAL)
488

    
489
      self.source_instance_name = \
490
        netutils.GetHostname(name=src_instance_name).name
491

    
492
    else:
493
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
494
                                 self.op.mode, errors.ECODE_INVAL)
495

    
496
  def ExpandNames(self):
497
    """ExpandNames for CreateInstance.
498

499
    Figure out the right locks for instance creation.
500

501
    """
502
    self.needed_locks = {}
503

    
504
    instance_name = self.op.instance_name
505
    # this is just a preventive check, but someone might still add this
506
    # instance in the meantime, and creation will fail at lock-add time
507
    if instance_name in self.cfg.GetInstanceList():
508
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
509
                                 instance_name, errors.ECODE_EXISTS)
510

    
511
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
512

    
513
    if self.op.iallocator:
514
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
515
      # specifying a group on instance creation and then selecting nodes from
516
      # that group
517
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
518
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
519

    
520
      if self.op.opportunistic_locking:
521
        self.opportunistic_locks[locking.LEVEL_NODE] = True
522
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
523
    else:
524
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
525
      nodelist = [self.op.pnode]
526
      if self.op.snode is not None:
527
        self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
528
        nodelist.append(self.op.snode)
529
      self.needed_locks[locking.LEVEL_NODE] = nodelist
530

    
531
    # in case of import lock the source node too
532
    if self.op.mode == constants.INSTANCE_IMPORT:
533
      src_node = self.op.src_node
534
      src_path = self.op.src_path
535

    
536
      if src_path is None:
537
        self.op.src_path = src_path = self.op.instance_name
538

    
539
      if src_node is None:
540
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
541
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
542
        self.op.src_node = None
543
        if os.path.isabs(src_path):
544
          raise errors.OpPrereqError("Importing an instance from a path"
545
                                     " requires a source node option",
546
                                     errors.ECODE_INVAL)
547
      else:
548
        self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
549
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
550
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
551
        if not os.path.isabs(src_path):
552
          self.op.src_path = src_path = \
553
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
554

    
555
    self.needed_locks[locking.LEVEL_NODE_RES] = \
556
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
557

    
558
  def _RunAllocator(self):
559
    """Run the allocator based on input opcode.
560

561
    """
562
    if self.op.opportunistic_locking:
563
      # Only consider nodes for which a lock is held
564
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
565
    else:
566
      node_whitelist = None
567

    
568
    #TODO Export network to iallocator so that it chooses a pnode
569
    #     in a nodegroup that has the desired network connected to
570
    req = _CreateInstanceAllocRequest(self.op, self.disks,
571
                                      self.nics, self.be_full,
572
                                      node_whitelist)
573
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
574

    
575
    ial.Run(self.op.iallocator)
576

    
577
    if not ial.success:
578
      # When opportunistic locks are used only a temporary failure is generated
579
      if self.op.opportunistic_locking:
580
        ecode = errors.ECODE_TEMP_NORES
581
      else:
582
        ecode = errors.ECODE_NORES
583

    
584
      raise errors.OpPrereqError("Can't compute nodes using"
585
                                 " iallocator '%s': %s" %
586
                                 (self.op.iallocator, ial.info),
587
                                 ecode)
588

    
589
    self.op.pnode = ial.result[0]
590
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
591
                 self.op.instance_name, self.op.iallocator,
592
                 utils.CommaJoin(ial.result))
593

    
594
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
595

    
596
    if req.RequiredNodes() == 2:
597
      self.op.snode = ial.result[1]
598

    
599
  def BuildHooksEnv(self):
600
    """Build hooks env.
601

602
    This runs on master, primary and secondary nodes of the instance.
603

604
    """
605
    env = {
606
      "ADD_MODE": self.op.mode,
607
      }
608
    if self.op.mode == constants.INSTANCE_IMPORT:
609
      env["SRC_NODE"] = self.op.src_node
610
      env["SRC_PATH"] = self.op.src_path
611
      env["SRC_IMAGES"] = self.src_images
612

    
613
    env.update(BuildInstanceHookEnv(
614
      name=self.op.instance_name,
615
      primary_node=self.op.pnode,
616
      secondary_nodes=self.secondaries,
617
      status=self.op.start,
618
      os_type=self.op.os_type,
619
      minmem=self.be_full[constants.BE_MINMEM],
620
      maxmem=self.be_full[constants.BE_MAXMEM],
621
      vcpus=self.be_full[constants.BE_VCPUS],
622
      nics=NICListToTuple(self, self.nics),
623
      disk_template=self.op.disk_template,
624
      disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
625
              d[constants.IDISK_MODE]) for d in self.disks],
626
      bep=self.be_full,
627
      hvp=self.hv_full,
628
      hypervisor_name=self.op.hypervisor,
629
      tags=self.op.tags,
630
      ))
631

    
632
    return env
633

    
634
  def BuildHooksNodes(self):
635
    """Build hooks nodes.
636

637
    """
638
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
639
    return nl, nl
640

    
641
  def _ReadExportInfo(self):
642
    """Reads the export information from disk.
643

644
    It will override the opcode source node and path with the actual
645
    information, if these two were not specified before.
646

647
    @return: the export information
648

649
    """
650
    assert self.op.mode == constants.INSTANCE_IMPORT
651

    
652
    src_node = self.op.src_node
653
    src_path = self.op.src_path
654

    
655
    if src_node is None:
656
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
657
      exp_list = self.rpc.call_export_list(locked_nodes)
658
      found = False
659
      for node in exp_list:
660
        if exp_list[node].fail_msg:
661
          continue
662
        if src_path in exp_list[node].payload:
663
          found = True
664
          self.op.src_node = src_node = node
665
          self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
666
                                                       src_path)
667
          break
668
      if not found:
669
        raise errors.OpPrereqError("No export found for relative path %s" %
670
                                   src_path, errors.ECODE_INVAL)
671

    
672
    CheckNodeOnline(self, src_node)
673
    result = self.rpc.call_export_info(src_node, src_path)
674
    result.Raise("No export or invalid export found in dir %s" % src_path)
675

    
676
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
677
    if not export_info.has_section(constants.INISECT_EXP):
678
      raise errors.ProgrammerError("Corrupted export config",
679
                                   errors.ECODE_ENVIRON)
680

    
681
    ei_version = export_info.get(constants.INISECT_EXP, "version")
682
    if (int(ei_version) != constants.EXPORT_VERSION):
683
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
684
                                 (ei_version, constants.EXPORT_VERSION),
685
                                 errors.ECODE_ENVIRON)
686
    return export_info
687

    
688
  def _ReadExportParams(self, einfo):
689
    """Use export parameters as defaults.
690

691
    In case the opcode doesn't specify (as in override) some instance
692
    parameters, then try to use them from the export information, if
693
    that declares them.
694

695
    """
696
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
697

    
698
    if self.op.disk_template is None:
699
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
700
        self.op.disk_template = einfo.get(constants.INISECT_INS,
701
                                          "disk_template")
702
        if self.op.disk_template not in constants.DISK_TEMPLATES:
703
          raise errors.OpPrereqError("Disk template specified in configuration"
704
                                     " file is not one of the allowed values:"
705
                                     " %s" %
706
                                     " ".join(constants.DISK_TEMPLATES),
707
                                     errors.ECODE_INVAL)
708
      else:
709
        raise errors.OpPrereqError("No disk template specified and the export"
710
                                   " is missing the disk_template information",
711
                                   errors.ECODE_INVAL)
712

    
713
    if not self.op.disks:
714
      disks = []
715
      # TODO: import the disk iv_name too
716
      for idx in range(constants.MAX_DISKS):
717
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
718
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
719
          disks.append({constants.IDISK_SIZE: disk_sz})
720
      self.op.disks = disks
721
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
722
        raise errors.OpPrereqError("No disk info specified and the export"
723
                                   " is missing the disk information",
724
                                   errors.ECODE_INVAL)
725

    
726
    if not self.op.nics:
727
      nics = []
728
      for idx in range(constants.MAX_NICS):
729
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
730
          ndict = {}
731
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
732
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
733
            ndict[name] = v
734
          nics.append(ndict)
735
        else:
736
          break
737
      self.op.nics = nics
738

    
739
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
740
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
741

    
742
    if (self.op.hypervisor is None and
743
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
744
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
745

    
746
    if einfo.has_section(constants.INISECT_HYP):
747
      # use the export parameters but do not override the ones
748
      # specified by the user
749
      for name, value in einfo.items(constants.INISECT_HYP):
750
        if name not in self.op.hvparams:
751
          self.op.hvparams[name] = value
752

    
753
    if einfo.has_section(constants.INISECT_BEP):
754
      # use the parameters, without overriding
755
      for name, value in einfo.items(constants.INISECT_BEP):
756
        if name not in self.op.beparams:
757
          self.op.beparams[name] = value
758
        # Compatibility for the old "memory" be param
759
        if name == constants.BE_MEMORY:
760
          if constants.BE_MAXMEM not in self.op.beparams:
761
            self.op.beparams[constants.BE_MAXMEM] = value
762
          if constants.BE_MINMEM not in self.op.beparams:
763
            self.op.beparams[constants.BE_MINMEM] = value
764
    else:
765
      # try to read the parameters old style, from the main section
766
      for name in constants.BES_PARAMETERS:
767
        if (name not in self.op.beparams and
768
            einfo.has_option(constants.INISECT_INS, name)):
769
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
770

    
771
    if einfo.has_section(constants.INISECT_OSP):
772
      # use the parameters, without overriding
773
      for name, value in einfo.items(constants.INISECT_OSP):
774
        if name not in self.op.osparams:
775
          self.op.osparams[name] = value
776

    
777
  def _RevertToDefaults(self, cluster):
778
    """Revert the instance parameters to the default values.
779

780
    """
781
    # hvparams
782
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
783
    for name in self.op.hvparams.keys():
784
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
785
        del self.op.hvparams[name]
786
    # beparams
787
    be_defs = cluster.SimpleFillBE({})
788
    for name in self.op.beparams.keys():
789
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
790
        del self.op.beparams[name]
791
    # nic params
792
    nic_defs = cluster.SimpleFillNIC({})
793
    for nic in self.op.nics:
794
      for name in constants.NICS_PARAMETERS:
795
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
796
          del nic[name]
797
    # osparams
798
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
799
    for name in self.op.osparams.keys():
800
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
801
        del self.op.osparams[name]
802

    
803
  def _CalculateFileStorageDir(self):
804
    """Calculate final instance file storage dir.
805

806
    """
807
    # file storage dir calculation/check
808
    self.instance_file_storage_dir = None
809
    if self.op.disk_template in constants.DTS_FILEBASED:
810
      # build the full file storage dir path
811
      joinargs = []
812

    
813
      if self.op.disk_template == constants.DT_SHARED_FILE:
814
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
815
      else:
816
        get_fsd_fn = self.cfg.GetFileStorageDir
817

    
818
      cfg_storagedir = get_fsd_fn()
819
      if not cfg_storagedir:
820
        raise errors.OpPrereqError("Cluster file storage dir not defined",
821
                                   errors.ECODE_STATE)
822
      joinargs.append(cfg_storagedir)
823

    
824
      if self.op.file_storage_dir is not None:
825
        joinargs.append(self.op.file_storage_dir)
826

    
827
      joinargs.append(self.op.instance_name)
828

    
829
      # pylint: disable=W0142
830
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
831

    
832
  def CheckPrereq(self): # pylint: disable=R0914
833
    """Check prerequisites.
834

835
    """
836
    self._CalculateFileStorageDir()
837

    
838
    if self.op.mode == constants.INSTANCE_IMPORT:
839
      export_info = self._ReadExportInfo()
840
      self._ReadExportParams(export_info)
841
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
842
    else:
843
      self._old_instance_name = None
844

    
845
    if (not self.cfg.GetVGName() and
846
        self.op.disk_template not in constants.DTS_NOT_LVM):
847
      raise errors.OpPrereqError("Cluster does not support lvm-based"
848
                                 " instances", errors.ECODE_STATE)
849

    
850
    if (self.op.hypervisor is None or
851
        self.op.hypervisor == constants.VALUE_AUTO):
852
      self.op.hypervisor = self.cfg.GetHypervisorType()
853

    
854
    cluster = self.cfg.GetClusterInfo()
855
    enabled_hvs = cluster.enabled_hypervisors
856
    if self.op.hypervisor not in enabled_hvs:
857
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
858
                                 " cluster (%s)" %
859
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
860
                                 errors.ECODE_STATE)
861

    
862
    # Check tag validity
863
    for tag in self.op.tags:
864
      objects.TaggableObject.ValidateTag(tag)
865

    
866
    # check hypervisor parameter syntax (locally)
867
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
868
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
869
                                      self.op.hvparams)
870
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
871
    hv_type.CheckParameterSyntax(filled_hvp)
872
    self.hv_full = filled_hvp
873
    # check that we don't specify global parameters on an instance
874
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
875
                         "instance", "cluster")
876

    
877
    # fill and remember the beparams dict
878
    self.be_full = _ComputeFullBeParams(self.op, cluster)
879

    
880
    # build os parameters
881
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
882

    
883
    # now that hvp/bep are in final format, let's reset to defaults,
884
    # if told to do so
885
    if self.op.identify_defaults:
886
      self._RevertToDefaults(cluster)
887

    
888
    # NIC buildup
889
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
890
                             self.proc.GetECId())
891

    
892
    # disk checks/pre-build
893
    default_vg = self.cfg.GetVGName()
894
    self.disks = ComputeDisks(self.op, default_vg)
895

    
896
    if self.op.mode == constants.INSTANCE_IMPORT:
897
      disk_images = []
898
      for idx in range(len(self.disks)):
899
        option = "disk%d_dump" % idx
900
        if export_info.has_option(constants.INISECT_INS, option):
901
          # FIXME: are the old os-es, disk sizes, etc. useful?
902
          export_name = export_info.get(constants.INISECT_INS, option)
903
          image = utils.PathJoin(self.op.src_path, export_name)
904
          disk_images.append(image)
905
        else:
906
          disk_images.append(False)
907

    
908
      self.src_images = disk_images
909

    
910
      if self.op.instance_name == self._old_instance_name:
911
        for idx, nic in enumerate(self.nics):
912
          if nic.mac == constants.VALUE_AUTO:
913
            nic_mac_ini = "nic%d_mac" % idx
914
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
915

    
916
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
917

    
918
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
919
    if self.op.ip_check:
920
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
921
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
922
                                   (self.check_ip, self.op.instance_name),
923
                                   errors.ECODE_NOTUNIQUE)
924

    
925
    #### mac address generation
926
    # By generating here the mac address both the allocator and the hooks get
927
    # the real final mac address rather than the 'auto' or 'generate' value.
928
    # There is a race condition between the generation and the instance object
929
    # creation, which means that we know the mac is valid now, but we're not
930
    # sure it will be when we actually add the instance. If things go bad
931
    # adding the instance will abort because of a duplicate mac, and the
932
    # creation job will fail.
933
    for nic in self.nics:
934
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
935
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
936

    
937
    #### allocator run
938

    
939
    if self.op.iallocator is not None:
940
      self._RunAllocator()
941

    
942
    # Release all unneeded node locks
943
    keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
944
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
945
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
946
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
947

    
948
    assert (self.owned_locks(locking.LEVEL_NODE) ==
949
            self.owned_locks(locking.LEVEL_NODE_RES)), \
950
      "Node locks differ from node resource locks"
951

    
952
    #### node related checks
953

    
954
    # check primary node
955
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
956
    assert self.pnode is not None, \
957
      "Cannot retrieve locked node %s" % self.op.pnode
958
    if pnode.offline:
959
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
960
                                 pnode.name, errors.ECODE_STATE)
961
    if pnode.drained:
962
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
963
                                 pnode.name, errors.ECODE_STATE)
964
    if not pnode.vm_capable:
965
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
966
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
967

    
968
    self.secondaries = []
969

    
970
    # Fill in any IPs from IP pools. This must happen here, because we need to
971
    # know the nic's primary node, as specified by the iallocator
972
    for idx, nic in enumerate(self.nics):
973
      net_uuid = nic.network
974
      if net_uuid is not None:
975
        nobj = self.cfg.GetNetwork(net_uuid)
976
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
977
        if netparams is None:
978
          raise errors.OpPrereqError("No netparams found for network"
979
                                     " %s. Propably not connected to"
980
                                     " node's %s nodegroup" %
981
                                     (nobj.name, self.pnode.name),
982
                                     errors.ECODE_INVAL)
983
        self.LogInfo("NIC/%d inherits netparams %s" %
984
                     (idx, netparams.values()))
985
        nic.nicparams = dict(netparams)
986
        if nic.ip is not None:
987
          if nic.ip.lower() == constants.NIC_IP_POOL:
988
            try:
989
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
990
            except errors.ReservationError:
991
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
992
                                         " from the address pool" % idx,
993
                                         errors.ECODE_STATE)
994
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
995
          else:
996
            try:
997
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
998
            except errors.ReservationError:
999
              raise errors.OpPrereqError("IP address %s already in use"
1000
                                         " or does not belong to network %s" %
1001
                                         (nic.ip, nobj.name),
1002
                                         errors.ECODE_NOTUNIQUE)
1003

    
1004
      # net is None, ip None or given
1005
      elif self.op.conflicts_check:
1006
        _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1007

    
1008
    # mirror node verification
1009
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1010
      if self.op.snode == pnode.name:
1011
        raise errors.OpPrereqError("The secondary node cannot be the"
1012
                                   " primary node", errors.ECODE_INVAL)
1013
      CheckNodeOnline(self, self.op.snode)
1014
      CheckNodeNotDrained(self, self.op.snode)
1015
      CheckNodeVmCapable(self, self.op.snode)
1016
      self.secondaries.append(self.op.snode)
1017

    
1018
      snode = self.cfg.GetNodeInfo(self.op.snode)
1019
      if pnode.group != snode.group:
1020
        self.LogWarning("The primary and secondary nodes are in two"
1021
                        " different node groups; the disk parameters"
1022
                        " from the first disk's node group will be"
1023
                        " used")
1024

    
1025
    nodes = [pnode]
1026
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1027
      nodes.append(snode)
1028
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1029
    excl_stor = compat.any(map(has_es, nodes))
1030
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1031
      raise errors.OpPrereqError("Disk template %s not supported with"
1032
                                 " exclusive storage" % self.op.disk_template,
1033
                                 errors.ECODE_STATE)
1034
    for disk in self.disks:
1035
      CheckSpindlesExclusiveStorage(disk, excl_stor)
1036

    
1037
    nodenames = [pnode.name] + self.secondaries
1038

    
1039
    if not self.adopt_disks:
1040
      if self.op.disk_template == constants.DT_RBD:
1041
        # _CheckRADOSFreeSpace() is just a placeholder.
1042
        # Any function that checks prerequisites can be placed here.
1043
        # Check if there is enough space on the RADOS cluster.
1044
        CheckRADOSFreeSpace()
1045
      elif self.op.disk_template == constants.DT_EXT:
1046
        # FIXME: Function that checks prereqs if needed
1047
        pass
1048
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1049
        # Check lv size requirements, if not adopting
1050
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1051
        CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1052
      else:
1053
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1054
        pass
1055

    
1056
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1057
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1058
                                disk[constants.IDISK_ADOPT])
1059
                     for disk in self.disks])
1060
      if len(all_lvs) != len(self.disks):
1061
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1062
                                   errors.ECODE_INVAL)
1063
      for lv_name in all_lvs:
1064
        try:
1065
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1066
          # to ReserveLV uses the same syntax
1067
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1068
        except errors.ReservationError:
1069
          raise errors.OpPrereqError("LV named %s used by another instance" %
1070
                                     lv_name, errors.ECODE_NOTUNIQUE)
1071

    
1072
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1073
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1074

    
1075
      node_lvs = self.rpc.call_lv_list([pnode.name],
1076
                                       vg_names.payload.keys())[pnode.name]
1077
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1078
      node_lvs = node_lvs.payload
1079

    
1080
      delta = all_lvs.difference(node_lvs.keys())
1081
      if delta:
1082
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1083
                                   utils.CommaJoin(delta),
1084
                                   errors.ECODE_INVAL)
1085
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1086
      if online_lvs:
1087
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1088
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1089
                                   errors.ECODE_STATE)
1090
      # update the size of disk based on what is found
1091
      for dsk in self.disks:
1092
        dsk[constants.IDISK_SIZE] = \
1093
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1094
                                        dsk[constants.IDISK_ADOPT])][0]))
1095

    
1096
    elif self.op.disk_template == constants.DT_BLOCK:
1097
      # Normalize and de-duplicate device paths
1098
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1099
                       for disk in self.disks])
1100
      if len(all_disks) != len(self.disks):
1101
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1102
                                   errors.ECODE_INVAL)
1103
      baddisks = [d for d in all_disks
1104
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1105
      if baddisks:
1106
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1107
                                   " cannot be adopted" %
1108
                                   (utils.CommaJoin(baddisks),
1109
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1110
                                   errors.ECODE_INVAL)
1111

    
1112
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
1113
                                            list(all_disks))[pnode.name]
1114
      node_disks.Raise("Cannot get block device information from node %s" %
1115
                       pnode.name)
1116
      node_disks = node_disks.payload
1117
      delta = all_disks.difference(node_disks.keys())
1118
      if delta:
1119
        raise errors.OpPrereqError("Missing block device(s): %s" %
1120
                                   utils.CommaJoin(delta),
1121
                                   errors.ECODE_INVAL)
1122
      for dsk in self.disks:
1123
        dsk[constants.IDISK_SIZE] = \
1124
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1125

    
1126
    # Verify instance specs
1127
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1128
    ispec = {
1129
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1130
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1131
      constants.ISPEC_DISK_COUNT: len(self.disks),
1132
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1133
                                  for disk in self.disks],
1134
      constants.ISPEC_NIC_COUNT: len(self.nics),
1135
      constants.ISPEC_SPINDLE_USE: spindle_use,
1136
      }
1137

    
1138
    group_info = self.cfg.GetNodeGroup(pnode.group)
1139
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1140
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1141
                                               self.op.disk_template)
1142
    if not self.op.ignore_ipolicy and res:
1143
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1144
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1145
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1146

    
1147
    CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1148

    
1149
    CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1150
    # check OS parameters (remotely)
1151
    CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1152

    
1153
    CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1154

    
1155
    #TODO: _CheckExtParams (remotely)
1156
    # Check parameters for extstorage
1157

    
1158
    # memory check on primary node
1159
    #TODO(dynmem): use MINMEM for checking
1160
    if self.op.start:
1161
      CheckNodeFreeMemory(self, self.pnode.name,
1162
                          "creating instance %s" % self.op.instance_name,
1163
                          self.be_full[constants.BE_MAXMEM],
1164
                          self.op.hypervisor)
1165

    
1166
    self.dry_run_result = list(nodenames)
1167

    
1168
  def Exec(self, feedback_fn):
1169
    """Create and add the instance to the cluster.
1170

1171
    """
1172
    instance = self.op.instance_name
1173
    pnode_name = self.pnode.name
1174

    
1175
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1176
                self.owned_locks(locking.LEVEL_NODE)), \
1177
      "Node locks differ from node resource locks"
1178
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1179

    
1180
    ht_kind = self.op.hypervisor
1181
    if ht_kind in constants.HTS_REQ_PORT:
1182
      network_port = self.cfg.AllocatePort()
1183
    else:
1184
      network_port = None
1185

    
1186
    # This is ugly but we got a chicken-egg problem here
1187
    # We can only take the group disk parameters, as the instance
1188
    # has no disks yet (we are generating them right here).
1189
    node = self.cfg.GetNodeInfo(pnode_name)
1190
    nodegroup = self.cfg.GetNodeGroup(node.group)
1191
    disks = GenerateDiskTemplate(self,
1192
                                 self.op.disk_template,
1193
                                 instance, pnode_name,
1194
                                 self.secondaries,
1195
                                 self.disks,
1196
                                 self.instance_file_storage_dir,
1197
                                 self.op.file_driver,
1198
                                 0,
1199
                                 feedback_fn,
1200
                                 self.cfg.GetGroupDiskParams(nodegroup))
1201

    
1202
    iobj = objects.Instance(name=instance, os=self.op.os_type,
1203
                            primary_node=pnode_name,
1204
                            nics=self.nics, disks=disks,
1205
                            disk_template=self.op.disk_template,
1206
                            admin_state=constants.ADMINST_DOWN,
1207
                            network_port=network_port,
1208
                            beparams=self.op.beparams,
1209
                            hvparams=self.op.hvparams,
1210
                            hypervisor=self.op.hypervisor,
1211
                            osparams=self.op.osparams,
1212
                            )
1213

    
1214
    if self.op.tags:
1215
      for tag in self.op.tags:
1216
        iobj.AddTag(tag)
1217

    
1218
    if self.adopt_disks:
1219
      if self.op.disk_template == constants.DT_PLAIN:
1220
        # rename LVs to the newly-generated names; we need to construct
1221
        # 'fake' LV disks with the old data, plus the new unique_id
1222
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1223
        rename_to = []
1224
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1225
          rename_to.append(t_dsk.logical_id)
1226
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1227
          self.cfg.SetDiskID(t_dsk, pnode_name)
1228
        result = self.rpc.call_blockdev_rename(pnode_name,
1229
                                               zip(tmp_disks, rename_to))
1230
        result.Raise("Failed to rename adoped LVs")
1231
    else:
1232
      feedback_fn("* creating instance disks...")
1233
      try:
1234
        CreateDisks(self, iobj)
1235
      except errors.OpExecError:
1236
        self.LogWarning("Device creation failed")
1237
        self.cfg.ReleaseDRBDMinors(instance)
1238
        raise
1239

    
1240
    feedback_fn("adding instance %s to cluster config" % instance)
1241

    
1242
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1243

    
1244
    # Declare that we don't want to remove the instance lock anymore, as we've
1245
    # added the instance to the config
1246
    del self.remove_locks[locking.LEVEL_INSTANCE]
1247

    
1248
    if self.op.mode == constants.INSTANCE_IMPORT:
1249
      # Release unused nodes
1250
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1251
    else:
1252
      # Release all nodes
1253
      ReleaseLocks(self, locking.LEVEL_NODE)
1254

    
1255
    disk_abort = False
1256
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1257
      feedback_fn("* wiping instance disks...")
1258
      try:
1259
        WipeDisks(self, iobj)
1260
      except errors.OpExecError, err:
1261
        logging.exception("Wiping disks failed")
1262
        self.LogWarning("Wiping instance disks failed (%s)", err)
1263
        disk_abort = True
1264

    
1265
    if disk_abort:
1266
      # Something is already wrong with the disks, don't do anything else
1267
      pass
1268
    elif self.op.wait_for_sync:
1269
      disk_abort = not WaitForSync(self, iobj)
1270
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1271
      # make sure the disks are not degraded (still sync-ing is ok)
1272
      feedback_fn("* checking mirrors status")
1273
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1274
    else:
1275
      disk_abort = False
1276

    
1277
    if disk_abort:
1278
      RemoveDisks(self, iobj)
1279
      self.cfg.RemoveInstance(iobj.name)
1280
      # Make sure the instance lock gets removed
1281
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1282
      raise errors.OpExecError("There are some degraded disks for"
1283
                               " this instance")
1284

    
1285
    # Release all node resource locks
1286
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1287

    
1288
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1289
      # we need to set the disks ID to the primary node, since the
1290
      # preceding code might or might have not done it, depending on
1291
      # disk template and other options
1292
      for disk in iobj.disks:
1293
        self.cfg.SetDiskID(disk, pnode_name)
1294
      if self.op.mode == constants.INSTANCE_CREATE:
1295
        if not self.op.no_install:
1296
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1297
                        not self.op.wait_for_sync)
1298
          if pause_sync:
1299
            feedback_fn("* pausing disk sync to install instance OS")
1300
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1301
                                                              (iobj.disks,
1302
                                                               iobj), True)
1303
            for idx, success in enumerate(result.payload):
1304
              if not success:
1305
                logging.warn("pause-sync of instance %s for disk %d failed",
1306
                             instance, idx)
1307

    
1308
          feedback_fn("* running the instance OS create scripts...")
1309
          # FIXME: pass debug option from opcode to backend
1310
          os_add_result = \
1311
            self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1312
                                          self.op.debug_level)
1313
          if pause_sync:
1314
            feedback_fn("* resuming disk sync")
1315
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1316
                                                              (iobj.disks,
1317
                                                               iobj), False)
1318
            for idx, success in enumerate(result.payload):
1319
              if not success:
1320
                logging.warn("resume-sync of instance %s for disk %d failed",
1321
                             instance, idx)
1322

    
1323
          os_add_result.Raise("Could not add os for instance %s"
1324
                              " on node %s" % (instance, pnode_name))
1325

    
1326
      else:
1327
        if self.op.mode == constants.INSTANCE_IMPORT:
1328
          feedback_fn("* running the instance OS import scripts...")
1329

    
1330
          transfers = []
1331

    
1332
          for idx, image in enumerate(self.src_images):
1333
            if not image:
1334
              continue
1335

    
1336
            # FIXME: pass debug option from opcode to backend
1337
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1338
                                               constants.IEIO_FILE, (image, ),
1339
                                               constants.IEIO_SCRIPT,
1340
                                               (iobj.disks[idx], idx),
1341
                                               None)
1342
            transfers.append(dt)
1343

    
1344
          import_result = \
1345
            masterd.instance.TransferInstanceData(self, feedback_fn,
1346
                                                  self.op.src_node, pnode_name,
1347
                                                  self.pnode.secondary_ip,
1348
                                                  iobj, transfers)
1349
          if not compat.all(import_result):
1350
            self.LogWarning("Some disks for instance %s on node %s were not"
1351
                            " imported successfully" % (instance, pnode_name))
1352

    
1353
          rename_from = self._old_instance_name
1354

    
1355
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1356
          feedback_fn("* preparing remote import...")
1357
          # The source cluster will stop the instance before attempting to make
1358
          # a connection. In some cases stopping an instance can take a long
1359
          # time, hence the shutdown timeout is added to the connection
1360
          # timeout.
1361
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1362
                             self.op.source_shutdown_timeout)
1363
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1364

    
1365
          assert iobj.primary_node == self.pnode.name
1366
          disk_results = \
1367
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1368
                                          self.source_x509_ca,
1369
                                          self._cds, timeouts)
1370
          if not compat.all(disk_results):
1371
            # TODO: Should the instance still be started, even if some disks
1372
            # failed to import (valid for local imports, too)?
1373
            self.LogWarning("Some disks for instance %s on node %s were not"
1374
                            " imported successfully" % (instance, pnode_name))
1375

    
1376
          rename_from = self.source_instance_name
1377

    
1378
        else:
1379
          # also checked in the prereq part
1380
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1381
                                       % self.op.mode)
1382

    
1383
        # Run rename script on newly imported instance
1384
        assert iobj.name == instance
1385
        feedback_fn("Running rename script for %s" % instance)
1386
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1387
                                                   rename_from,
1388
                                                   self.op.debug_level)
1389
        if result.fail_msg:
1390
          self.LogWarning("Failed to run rename script for %s on node"
1391
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
1392

    
1393
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1394

    
1395
    if self.op.start:
1396
      iobj.admin_state = constants.ADMINST_UP
1397
      self.cfg.Update(iobj, feedback_fn)
1398
      logging.info("Starting instance %s on node %s", instance, pnode_name)
1399
      feedback_fn("* starting instance...")
1400
      result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1401
                                            False, self.op.reason)
1402
      result.Raise("Could not start instance")
1403

    
1404
    return list(iobj.all_nodes)
1405

    
1406

    
1407
class LUInstanceRename(LogicalUnit):
1408
  """Rename an instance.
1409

1410
  """
1411
  HPATH = "instance-rename"
1412
  HTYPE = constants.HTYPE_INSTANCE
1413

    
1414
  def CheckArguments(self):
1415
    """Check arguments.
1416

1417
    """
1418
    if self.op.ip_check and not self.op.name_check:
1419
      # TODO: make the ip check more flexible and not depend on the name check
1420
      raise errors.OpPrereqError("IP address check requires a name check",
1421
                                 errors.ECODE_INVAL)
1422

    
1423
  def BuildHooksEnv(self):
1424
    """Build hooks env.
1425

1426
    This runs on master, primary and secondary nodes of the instance.
1427

1428
    """
1429
    env = BuildInstanceHookEnvByObject(self, self.instance)
1430
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1431
    return env
1432

    
1433
  def BuildHooksNodes(self):
1434
    """Build hooks nodes.
1435

1436
    """
1437
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1438
    return (nl, nl)
1439

    
1440
  def CheckPrereq(self):
1441
    """Check prerequisites.
1442

1443
    This checks that the instance is in the cluster and is not running.
1444

1445
    """
1446
    self.op.instance_name = ExpandInstanceName(self.cfg,
1447
                                               self.op.instance_name)
1448
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1449
    assert instance is not None
1450
    CheckNodeOnline(self, instance.primary_node)
1451
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1452
                       msg="cannot rename")
1453
    self.instance = instance
1454

    
1455
    new_name = self.op.new_name
1456
    if self.op.name_check:
1457
      hostname = _CheckHostnameSane(self, new_name)
1458
      new_name = self.op.new_name = hostname.name
1459
      if (self.op.ip_check and
1460
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1461
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1462
                                   (hostname.ip, new_name),
1463
                                   errors.ECODE_NOTUNIQUE)
1464

    
1465
    instance_list = self.cfg.GetInstanceList()
1466
    if new_name in instance_list and new_name != instance.name:
1467
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1468
                                 new_name, errors.ECODE_EXISTS)
1469

    
1470
  def Exec(self, feedback_fn):
1471
    """Rename the instance.
1472

1473
    """
1474
    inst = self.instance
1475
    old_name = inst.name
1476

    
1477
    rename_file_storage = False
1478
    if (inst.disk_template in constants.DTS_FILEBASED and
1479
        self.op.new_name != inst.name):
1480
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1481
      rename_file_storage = True
1482

    
1483
    self.cfg.RenameInstance(inst.name, self.op.new_name)
1484
    # Change the instance lock. This is definitely safe while we hold the BGL.
1485
    # Otherwise the new lock would have to be added in acquired mode.
1486
    assert self.REQ_BGL
1487
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1488
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1489
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1490

    
1491
    # re-read the instance from the configuration after rename
1492
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
1493

    
1494
    if rename_file_storage:
1495
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1496
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1497
                                                     old_file_storage_dir,
1498
                                                     new_file_storage_dir)
1499
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1500
                   " (but the instance has been renamed in Ganeti)" %
1501
                   (inst.primary_node, old_file_storage_dir,
1502
                    new_file_storage_dir))
1503

    
1504
    StartInstanceDisks(self, inst, None)
1505
    # update info on disks
1506
    info = GetInstanceInfoText(inst)
1507
    for (idx, disk) in enumerate(inst.disks):
1508
      for node in inst.all_nodes:
1509
        self.cfg.SetDiskID(disk, node)
1510
        result = self.rpc.call_blockdev_setinfo(node, disk, info)
1511
        if result.fail_msg:
1512
          self.LogWarning("Error setting info on node %s for disk %s: %s",
1513
                          node, idx, result.fail_msg)
1514
    try:
1515
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1516
                                                 old_name, self.op.debug_level)
1517
      msg = result.fail_msg
1518
      if msg:
1519
        msg = ("Could not run OS rename script for instance %s on node %s"
1520
               " (but the instance has been renamed in Ganeti): %s" %
1521
               (inst.name, inst.primary_node, msg))
1522
        self.LogWarning(msg)
1523
    finally:
1524
      ShutdownInstanceDisks(self, inst)
1525

    
1526
    return inst.name
1527

    
1528

    
1529
class LUInstanceRemove(LogicalUnit):
1530
  """Remove an instance.
1531

1532
  """
1533
  HPATH = "instance-remove"
1534
  HTYPE = constants.HTYPE_INSTANCE
1535
  REQ_BGL = False
1536

    
1537
  def ExpandNames(self):
1538
    self._ExpandAndLockInstance()
1539
    self.needed_locks[locking.LEVEL_NODE] = []
1540
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1541
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1542

    
1543
  def DeclareLocks(self, level):
1544
    if level == locking.LEVEL_NODE:
1545
      self._LockInstancesNodes()
1546
    elif level == locking.LEVEL_NODE_RES:
1547
      # Copy node locks
1548
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1549
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1550

    
1551
  def BuildHooksEnv(self):
1552
    """Build hooks env.
1553

1554
    This runs on master, primary and secondary nodes of the instance.
1555

1556
    """
1557
    env = BuildInstanceHookEnvByObject(self, self.instance)
1558
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1559
    return env
1560

    
1561
  def BuildHooksNodes(self):
1562
    """Build hooks nodes.
1563

1564
    """
1565
    nl = [self.cfg.GetMasterNode()]
1566
    nl_post = list(self.instance.all_nodes) + nl
1567
    return (nl, nl_post)
1568

    
1569
  def CheckPrereq(self):
1570
    """Check prerequisites.
1571

1572
    This checks that the instance is in the cluster.
1573

1574
    """
1575
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1576
    assert self.instance is not None, \
1577
      "Cannot retrieve locked instance %s" % self.op.instance_name
1578

    
1579
  def Exec(self, feedback_fn):
1580
    """Remove the instance.
1581

1582
    """
1583
    instance = self.instance
1584
    logging.info("Shutting down instance %s on node %s",
1585
                 instance.name, instance.primary_node)
1586

    
1587
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1588
                                             self.op.shutdown_timeout,
1589
                                             self.op.reason)
1590
    msg = result.fail_msg
1591
    if msg:
1592
      if self.op.ignore_failures:
1593
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
1594
      else:
1595
        raise errors.OpExecError("Could not shutdown instance %s on"
1596
                                 " node %s: %s" %
1597
                                 (instance.name, instance.primary_node, msg))
1598

    
1599
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1600
            self.owned_locks(locking.LEVEL_NODE_RES))
1601
    assert not (set(instance.all_nodes) -
1602
                self.owned_locks(locking.LEVEL_NODE)), \
1603
      "Not owning correct locks"
1604

    
1605
    RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1606

    
1607

    
1608
class LUInstanceMove(LogicalUnit):
1609
  """Move an instance by data-copying.
1610

1611
  """
1612
  HPATH = "instance-move"
1613
  HTYPE = constants.HTYPE_INSTANCE
1614
  REQ_BGL = False
1615

    
1616
  def ExpandNames(self):
1617
    self._ExpandAndLockInstance()
1618
    target_node = ExpandNodeName(self.cfg, self.op.target_node)
1619
    self.op.target_node = target_node
1620
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
1621
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1622
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1623

    
1624
  def DeclareLocks(self, level):
1625
    if level == locking.LEVEL_NODE:
1626
      self._LockInstancesNodes(primary_only=True)
1627
    elif level == locking.LEVEL_NODE_RES:
1628
      # Copy node locks
1629
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1630
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1631

    
1632
  def BuildHooksEnv(self):
1633
    """Build hooks env.
1634

1635
    This runs on master, primary and secondary nodes of the instance.
1636

1637
    """
1638
    env = {
1639
      "TARGET_NODE": self.op.target_node,
1640
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1641
      }
1642
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1643
    return env
1644

    
1645
  def BuildHooksNodes(self):
1646
    """Build hooks nodes.
1647

1648
    """
1649
    nl = [
1650
      self.cfg.GetMasterNode(),
1651
      self.instance.primary_node,
1652
      self.op.target_node,
1653
      ]
1654
    return (nl, nl)
1655

    
1656
  def CheckPrereq(self):
1657
    """Check prerequisites.
1658

1659
    This checks that the instance is in the cluster.
1660

1661
    """
1662
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1663
    assert self.instance is not None, \
1664
      "Cannot retrieve locked instance %s" % self.op.instance_name
1665

    
1666
    if instance.disk_template not in constants.DTS_COPYABLE:
1667
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1668
                                 instance.disk_template, errors.ECODE_STATE)
1669

    
1670
    node = self.cfg.GetNodeInfo(self.op.target_node)
1671
    assert node is not None, \
1672
      "Cannot retrieve locked node %s" % self.op.target_node
1673

    
1674
    self.target_node = target_node = node.name
1675

    
1676
    if target_node == instance.primary_node:
1677
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1678
                                 (instance.name, target_node),
1679
                                 errors.ECODE_STATE)
1680

    
1681
    bep = self.cfg.GetClusterInfo().FillBE(instance)
1682

    
1683
    for idx, dsk in enumerate(instance.disks):
1684
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1685
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1686
                                   " cannot copy" % idx, errors.ECODE_STATE)
1687

    
1688
    CheckNodeOnline(self, target_node)
1689
    CheckNodeNotDrained(self, target_node)
1690
    CheckNodeVmCapable(self, target_node)
1691
    cluster = self.cfg.GetClusterInfo()
1692
    group_info = self.cfg.GetNodeGroup(node.group)
1693
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1694
    CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1695
                           ignore=self.op.ignore_ipolicy)
1696

    
1697
    if instance.admin_state == constants.ADMINST_UP:
1698
      # check memory requirements on the secondary node
1699
      CheckNodeFreeMemory(self, target_node,
1700
                          "failing over instance %s" %
1701
                          instance.name, bep[constants.BE_MAXMEM],
1702
                          instance.hypervisor)
1703
    else:
1704
      self.LogInfo("Not checking memory on the secondary node as"
1705
                   " instance will not be started")
1706

    
1707
    # check bridge existance
1708
    CheckInstanceBridgesExist(self, instance, node=target_node)
1709

    
1710
  def Exec(self, feedback_fn):
1711
    """Move an instance.
1712

1713
    The move is done by shutting it down on its present node, copying
1714
    the data over (slow) and starting it on the new node.
1715

1716
    """
1717
    instance = self.instance
1718

    
1719
    source_node = instance.primary_node
1720
    target_node = self.target_node
1721

    
1722
    self.LogInfo("Shutting down instance %s on source node %s",
1723
                 instance.name, source_node)
1724

    
1725
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1726
            self.owned_locks(locking.LEVEL_NODE_RES))
1727

    
1728
    result = self.rpc.call_instance_shutdown(source_node, instance,
1729
                                             self.op.shutdown_timeout,
1730
                                             self.op.reason)
1731
    msg = result.fail_msg
1732
    if msg:
1733
      if self.op.ignore_consistency:
1734
        self.LogWarning("Could not shutdown instance %s on node %s."
1735
                        " Proceeding anyway. Please make sure node"
1736
                        " %s is down. Error details: %s",
1737
                        instance.name, source_node, source_node, msg)
1738
      else:
1739
        raise errors.OpExecError("Could not shutdown instance %s on"
1740
                                 " node %s: %s" %
1741
                                 (instance.name, source_node, msg))
1742

    
1743
    # create the target disks
1744
    try:
1745
      CreateDisks(self, instance, target_node=target_node)
1746
    except errors.OpExecError:
1747
      self.LogWarning("Device creation failed")
1748
      self.cfg.ReleaseDRBDMinors(instance.name)
1749
      raise
1750

    
1751
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1752

    
1753
    errs = []
1754
    # activate, get path, copy the data over
1755
    for idx, disk in enumerate(instance.disks):
1756
      self.LogInfo("Copying data for disk %d", idx)
1757
      result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1758
                                               instance.name, True, idx)
1759
      if result.fail_msg:
1760
        self.LogWarning("Can't assemble newly created disk %d: %s",
1761
                        idx, result.fail_msg)
1762
        errs.append(result.fail_msg)
1763
        break
1764
      dev_path = result.payload
1765
      result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1766
                                             target_node, dev_path,
1767
                                             cluster_name)
1768
      if result.fail_msg:
1769
        self.LogWarning("Can't copy data over for disk %d: %s",
1770
                        idx, result.fail_msg)
1771
        errs.append(result.fail_msg)
1772
        break
1773

    
1774
    if errs:
1775
      self.LogWarning("Some disks failed to copy, aborting")
1776
      try:
1777
        RemoveDisks(self, instance, target_node=target_node)
1778
      finally:
1779
        self.cfg.ReleaseDRBDMinors(instance.name)
1780
        raise errors.OpExecError("Errors during disk copy: %s" %
1781
                                 (",".join(errs),))
1782

    
1783
    instance.primary_node = target_node
1784
    self.cfg.Update(instance, feedback_fn)
1785

    
1786
    self.LogInfo("Removing the disks on the original node")
1787
    RemoveDisks(self, instance, target_node=source_node)
1788

    
1789
    # Only start the instance if it's marked as up
1790
    if instance.admin_state == constants.ADMINST_UP:
1791
      self.LogInfo("Starting instance %s on node %s",
1792
                   instance.name, target_node)
1793

    
1794
      disks_ok, _ = AssembleInstanceDisks(self, instance,
1795
                                          ignore_secondaries=True)
1796
      if not disks_ok:
1797
        ShutdownInstanceDisks(self, instance)
1798
        raise errors.OpExecError("Can't activate the instance's disks")
1799

    
1800
      result = self.rpc.call_instance_start(target_node,
1801
                                            (instance, None, None), False,
1802
                                            self.op.reason)
1803
      msg = result.fail_msg
1804
      if msg:
1805
        ShutdownInstanceDisks(self, instance)
1806
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1807
                                 (instance.name, target_node, msg))
1808

    
1809

    
1810
class LUInstanceMultiAlloc(NoHooksLU):
1811
  """Allocates multiple instances at the same time.
1812

1813
  """
1814
  REQ_BGL = False
1815

    
1816
  def CheckArguments(self):
1817
    """Check arguments.
1818

1819
    """
1820
    nodes = []
1821
    for inst in self.op.instances:
1822
      if inst.iallocator is not None:
1823
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1824
                                   " instance objects", errors.ECODE_INVAL)
1825
      nodes.append(bool(inst.pnode))
1826
      if inst.disk_template in constants.DTS_INT_MIRROR:
1827
        nodes.append(bool(inst.snode))
1828

    
1829
    has_nodes = compat.any(nodes)
1830
    if compat.all(nodes) ^ has_nodes:
1831
      raise errors.OpPrereqError("There are instance objects providing"
1832
                                 " pnode/snode while others do not",
1833
                                 errors.ECODE_INVAL)
1834

    
1835
    if self.op.iallocator is None:
1836
      default_iallocator = self.cfg.GetDefaultIAllocator()
1837
      if default_iallocator and has_nodes:
1838
        self.op.iallocator = default_iallocator
1839
      else:
1840
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1841
                                   " given and no cluster-wide default"
1842
                                   " iallocator found; please specify either"
1843
                                   " an iallocator or nodes on the instances"
1844
                                   " or set a cluster-wide default iallocator",
1845
                                   errors.ECODE_INVAL)
1846

    
1847
    _CheckOpportunisticLocking(self.op)
1848

    
1849
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1850
    if dups:
1851
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1852
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1853

    
1854
  def ExpandNames(self):
1855
    """Calculate the locks.
1856

1857
    """
1858
    self.share_locks = ShareAll()
1859
    self.needed_locks = {
1860
      # iallocator will select nodes and even if no iallocator is used,
1861
      # collisions with LUInstanceCreate should be avoided
1862
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1863
      }
1864

    
1865
    if self.op.iallocator:
1866
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1867
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1868

    
1869
      if self.op.opportunistic_locking:
1870
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1871
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1872
    else:
1873
      nodeslist = []
1874
      for inst in self.op.instances:
1875
        inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1876
        nodeslist.append(inst.pnode)
1877
        if inst.snode is not None:
1878
          inst.snode = ExpandNodeName(self.cfg, inst.snode)
1879
          nodeslist.append(inst.snode)
1880

    
1881
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1882
      # Lock resources of instance's primary and secondary nodes (copy to
1883
      # prevent accidential modification)
1884
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1885

    
1886
  def CheckPrereq(self):
1887
    """Check prerequisite.
1888

1889
    """
1890
    cluster = self.cfg.GetClusterInfo()
1891
    default_vg = self.cfg.GetVGName()
1892
    ec_id = self.proc.GetECId()
1893

    
1894
    if self.op.opportunistic_locking:
1895
      # Only consider nodes for which a lock is held
1896
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1897
    else:
1898
      node_whitelist = None
1899

    
1900
    insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1901
                                         _ComputeNics(op, cluster, None,
1902
                                                      self.cfg, ec_id),
1903
                                         _ComputeFullBeParams(op, cluster),
1904
                                         node_whitelist)
1905
             for op in self.op.instances]
1906

    
1907
    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1908
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1909

    
1910
    ial.Run(self.op.iallocator)
1911

    
1912
    if not ial.success:
1913
      raise errors.OpPrereqError("Can't compute nodes using"
1914
                                 " iallocator '%s': %s" %
1915
                                 (self.op.iallocator, ial.info),
1916
                                 errors.ECODE_NORES)
1917

    
1918
    self.ia_result = ial.result
1919

    
1920
    if self.op.dry_run:
1921
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1922
        constants.JOB_IDS_KEY: [],
1923
        })
1924

    
1925
  def _ConstructPartialResult(self):
1926
    """Contructs the partial result.
1927

1928
    """
1929
    (allocatable, failed) = self.ia_result
1930
    return {
1931
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1932
        map(compat.fst, allocatable),
1933
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1934
      }
1935

    
1936
  def Exec(self, feedback_fn):
1937
    """Executes the opcode.
1938

1939
    """
1940
    op2inst = dict((op.instance_name, op) for op in self.op.instances)
1941
    (allocatable, failed) = self.ia_result
1942

    
1943
    jobs = []
1944
    for (name, nodes) in allocatable:
1945
      op = op2inst.pop(name)
1946

    
1947
      if len(nodes) > 1:
1948
        (op.pnode, op.snode) = nodes
1949
      else:
1950
        (op.pnode,) = nodes
1951

    
1952
      jobs.append([op])
1953

    
1954
    missing = set(op2inst.keys()) - set(failed)
1955
    assert not missing, \
1956
      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1957

    
1958
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
1959

    
1960

    
1961
class _InstNicModPrivate:
1962
  """Data structure for network interface modifications.
1963

1964
  Used by L{LUInstanceSetParams}.
1965

1966
  """
1967
  def __init__(self):
1968
    self.params = None
1969
    self.filled = None
1970

    
1971

    
1972
def _PrepareContainerMods(mods, private_fn):
1973
  """Prepares a list of container modifications by adding a private data field.
1974

1975
  @type mods: list of tuples; (operation, index, parameters)
1976
  @param mods: List of modifications
1977
  @type private_fn: callable or None
1978
  @param private_fn: Callable for constructing a private data field for a
1979
    modification
1980
  @rtype: list
1981

1982
  """
1983
  if private_fn is None:
1984
    fn = lambda: None
1985
  else:
1986
    fn = private_fn
1987

    
1988
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
1989

    
1990

    
1991
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
1992
  """Checks if nodes have enough physical CPUs
1993

1994
  This function checks if all given nodes have the needed number of
1995
  physical CPUs. In case any node has less CPUs or we cannot get the
1996
  information from the node, this function raises an OpPrereqError
1997
  exception.
1998

1999
  @type lu: C{LogicalUnit}
2000
  @param lu: a logical unit from which we get configuration data
2001
  @type nodenames: C{list}
2002
  @param nodenames: the list of node names to check
2003
  @type requested: C{int}
2004
  @param requested: the minimum acceptable number of physical CPUs
2005
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2006
      or we cannot check the node
2007

2008
  """
2009
  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2010
  for node in nodenames:
2011
    info = nodeinfo[node]
2012
    info.Raise("Cannot get current information from node %s" % node,
2013
               prereq=True, ecode=errors.ECODE_ENVIRON)
2014
    (_, _, (hv_info, )) = info.payload
2015
    num_cpus = hv_info.get("cpu_total", None)
2016
    if not isinstance(num_cpus, int):
2017
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2018
                                 " on node %s, result was '%s'" %
2019
                                 (node, num_cpus), errors.ECODE_ENVIRON)
2020
    if requested > num_cpus:
2021
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2022
                                 "required" % (node, num_cpus, requested),
2023
                                 errors.ECODE_NORES)
2024

    
2025

    
2026
def GetItemFromContainer(identifier, kind, container):
2027
  """Return the item refered by the identifier.
2028

2029
  @type identifier: string
2030
  @param identifier: Item index or name or UUID
2031
  @type kind: string
2032
  @param kind: One-word item description
2033
  @type container: list
2034
  @param container: Container to get the item from
2035

2036
  """
2037
  # Index
2038
  try:
2039
    idx = int(identifier)
2040
    if idx == -1:
2041
      # Append
2042
      absidx = len(container) - 1
2043
    elif idx < 0:
2044
      raise IndexError("Not accepting negative indices other than -1")
2045
    elif idx > len(container):
2046
      raise IndexError("Got %s index %s, but there are only %s" %
2047
                       (kind, idx, len(container)))
2048
    else:
2049
      absidx = idx
2050
    return (absidx, container[idx])
2051
  except ValueError:
2052
    pass
2053

    
2054
  for idx, item in enumerate(container):
2055
    if item.uuid == identifier or item.name == identifier:
2056
      return (idx, item)
2057

    
2058
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2059
                             (kind, identifier), errors.ECODE_NOENT)
2060

    
2061

    
2062
def _ApplyContainerMods(kind, container, chgdesc, mods,
2063
                        create_fn, modify_fn, remove_fn):
2064
  """Applies descriptions in C{mods} to C{container}.
2065

2066
  @type kind: string
2067
  @param kind: One-word item description
2068
  @type container: list
2069
  @param container: Container to modify
2070
  @type chgdesc: None or list
2071
  @param chgdesc: List of applied changes
2072
  @type mods: list
2073
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2074
  @type create_fn: callable
2075
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2076
    receives absolute item index, parameters and private data object as added
2077
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2078
    as list
2079
  @type modify_fn: callable
2080
  @param modify_fn: Callback for modifying an existing item
2081
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2082
    and private data object as added by L{_PrepareContainerMods}, returns
2083
    changes as list
2084
  @type remove_fn: callable
2085
  @param remove_fn: Callback on removing item; receives absolute item index,
2086
    item and private data object as added by L{_PrepareContainerMods}
2087

2088
  """
2089
  for (op, identifier, params, private) in mods:
2090
    changes = None
2091

    
2092
    if op == constants.DDM_ADD:
2093
      # Calculate where item will be added
2094
      # When adding an item, identifier can only be an index
2095
      try:
2096
        idx = int(identifier)
2097
      except ValueError:
2098
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2099
                                   " identifier for %s" % constants.DDM_ADD,
2100
                                   errors.ECODE_INVAL)
2101
      if idx == -1:
2102
        addidx = len(container)
2103
      else:
2104
        if idx < 0:
2105
          raise IndexError("Not accepting negative indices other than -1")
2106
        elif idx > len(container):
2107
          raise IndexError("Got %s index %s, but there are only %s" %
2108
                           (kind, idx, len(container)))
2109
        addidx = idx
2110

    
2111
      if create_fn is None:
2112
        item = params
2113
      else:
2114
        (item, changes) = create_fn(addidx, params, private)
2115

    
2116
      if idx == -1:
2117
        container.append(item)
2118
      else:
2119
        assert idx >= 0
2120
        assert idx <= len(container)
2121
        # list.insert does so before the specified index
2122
        container.insert(idx, item)
2123
    else:
2124
      # Retrieve existing item
2125
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2126

    
2127
      if op == constants.DDM_REMOVE:
2128
        assert not params
2129

    
2130
        if remove_fn is not None:
2131
          remove_fn(absidx, item, private)
2132

    
2133
        changes = [("%s/%s" % (kind, absidx), "remove")]
2134

    
2135
        assert container[absidx] == item
2136
        del container[absidx]
2137
      elif op == constants.DDM_MODIFY:
2138
        if modify_fn is not None:
2139
          changes = modify_fn(absidx, item, params, private)
2140
      else:
2141
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2142

    
2143
    assert _TApplyContModsCbChanges(changes)
2144

    
2145
    if not (chgdesc is None or changes is None):
2146
      chgdesc.extend(changes)
2147

    
2148

    
2149
def _UpdateIvNames(base_index, disks):
2150
  """Updates the C{iv_name} attribute of disks.
2151

2152
  @type disks: list of L{objects.Disk}
2153

2154
  """
2155
  for (idx, disk) in enumerate(disks):
2156
    disk.iv_name = "disk/%s" % (base_index + idx, )
2157

    
2158

    
2159
class LUInstanceSetParams(LogicalUnit):
2160
  """Modifies an instances's parameters.
2161

2162
  """
2163
  HPATH = "instance-modify"
2164
  HTYPE = constants.HTYPE_INSTANCE
2165
  REQ_BGL = False
2166

    
2167
  @staticmethod
2168
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2169
    assert ht.TList(mods)
2170
    assert not mods or len(mods[0]) in (2, 3)
2171

    
2172
    if mods and len(mods[0]) == 2:
2173
      result = []
2174

    
2175
      addremove = 0
2176
      for op, params in mods:
2177
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2178
          result.append((op, -1, params))
2179
          addremove += 1
2180

    
2181
          if addremove > 1:
2182
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2183
                                       " supported at a time" % kind,
2184
                                       errors.ECODE_INVAL)
2185
        else:
2186
          result.append((constants.DDM_MODIFY, op, params))
2187

    
2188
      assert verify_fn(result)
2189
    else:
2190
      result = mods
2191

    
2192
    return result
2193

    
2194
  @staticmethod
2195
  def _CheckMods(kind, mods, key_types, item_fn):
2196
    """Ensures requested disk/NIC modifications are valid.
2197

2198
    """
2199
    for (op, _, params) in mods:
2200
      assert ht.TDict(params)
2201

    
2202
      # If 'key_types' is an empty dict, we assume we have an
2203
      # 'ext' template and thus do not ForceDictType
2204
      if key_types:
2205
        utils.ForceDictType(params, key_types)
2206

    
2207
      if op == constants.DDM_REMOVE:
2208
        if params:
2209
          raise errors.OpPrereqError("No settings should be passed when"
2210
                                     " removing a %s" % kind,
2211
                                     errors.ECODE_INVAL)
2212
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2213
        item_fn(op, params)
2214
      else:
2215
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2216

    
2217
  @staticmethod
2218
  def _VerifyDiskModification(op, params, excl_stor):
2219
    """Verifies a disk modification.
2220

2221
    """
2222
    if op == constants.DDM_ADD:
2223
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2224
      if mode not in constants.DISK_ACCESS_SET:
2225
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2226
                                   errors.ECODE_INVAL)
2227

    
2228
      size = params.get(constants.IDISK_SIZE, None)
2229
      if size is None:
2230
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2231
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2232

    
2233
      try:
2234
        size = int(size)
2235
      except (TypeError, ValueError), err:
2236
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2237
                                   errors.ECODE_INVAL)
2238

    
2239
      params[constants.IDISK_SIZE] = size
2240
      name = params.get(constants.IDISK_NAME, None)
2241
      if name is not None and name.lower() == constants.VALUE_NONE:
2242
        params[constants.IDISK_NAME] = None
2243

    
2244
      CheckSpindlesExclusiveStorage(params, excl_stor)
2245

    
2246
    elif op == constants.DDM_MODIFY:
2247
      if constants.IDISK_SIZE in params:
2248
        raise errors.OpPrereqError("Disk size change not possible, use"
2249
                                   " grow-disk", errors.ECODE_INVAL)
2250
      if len(params) > 2:
2251
        raise errors.OpPrereqError("Disk modification doesn't support"
2252
                                   " additional arbitrary parameters",
2253
                                   errors.ECODE_INVAL)
2254
      name = params.get(constants.IDISK_NAME, None)
2255
      if name is not None and name.lower() == constants.VALUE_NONE:
2256
        params[constants.IDISK_NAME] = None
2257

    
2258
  @staticmethod
2259
  def _VerifyNicModification(op, params):
2260
    """Verifies a network interface modification.
2261

2262
    """
2263
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2264
      ip = params.get(constants.INIC_IP, None)
2265
      name = params.get(constants.INIC_NAME, None)
2266
      req_net = params.get(constants.INIC_NETWORK, None)
2267
      link = params.get(constants.NIC_LINK, None)
2268
      mode = params.get(constants.NIC_MODE, None)
2269
      if name is not None and name.lower() == constants.VALUE_NONE:
2270
        params[constants.INIC_NAME] = None
2271
      if req_net is not None:
2272
        if req_net.lower() == constants.VALUE_NONE:
2273
          params[constants.INIC_NETWORK] = None
2274
          req_net = None
2275
        elif link is not None or mode is not None:
2276
          raise errors.OpPrereqError("If network is given"
2277
                                     " mode or link should not",
2278
                                     errors.ECODE_INVAL)
2279

    
2280
      if op == constants.DDM_ADD:
2281
        macaddr = params.get(constants.INIC_MAC, None)
2282
        if macaddr is None:
2283
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2284

    
2285
      if ip is not None:
2286
        if ip.lower() == constants.VALUE_NONE:
2287
          params[constants.INIC_IP] = None
2288
        else:
2289
          if ip.lower() == constants.NIC_IP_POOL:
2290
            if op == constants.DDM_ADD and req_net is None:
2291
              raise errors.OpPrereqError("If ip=pool, parameter network"
2292
                                         " cannot be none",
2293
                                         errors.ECODE_INVAL)
2294
          else:
2295
            if not netutils.IPAddress.IsValid(ip):
2296
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2297
                                         errors.ECODE_INVAL)
2298

    
2299
      if constants.INIC_MAC in params:
2300
        macaddr = params[constants.INIC_MAC]
2301
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2302
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2303

    
2304
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2305
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2306
                                     " modifying an existing NIC",
2307
                                     errors.ECODE_INVAL)
2308

    
2309
  def CheckArguments(self):
2310
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2311
            self.op.hvparams or self.op.beparams or self.op.os_name or
2312
            self.op.offline is not None or self.op.runtime_mem or
2313
            self.op.pnode):
2314
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2315

    
2316
    if self.op.hvparams:
2317
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2318
                           "hypervisor", "instance", "cluster")
2319

    
2320
    self.op.disks = self._UpgradeDiskNicMods(
2321
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2322
    self.op.nics = self._UpgradeDiskNicMods(
2323
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2324

    
2325
    if self.op.disks and self.op.disk_template is not None:
2326
      raise errors.OpPrereqError("Disk template conversion and other disk"
2327
                                 " changes not supported at the same time",
2328
                                 errors.ECODE_INVAL)
2329

    
2330
    if (self.op.disk_template and
2331
        self.op.disk_template in constants.DTS_INT_MIRROR and
2332
        self.op.remote_node is None):
2333
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2334
                                 " one requires specifying a secondary node",
2335
                                 errors.ECODE_INVAL)
2336

    
2337
    # Check NIC modifications
2338
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2339
                    self._VerifyNicModification)
2340

    
2341
    if self.op.pnode:
2342
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2343

    
2344
  def ExpandNames(self):
2345
    self._ExpandAndLockInstance()
2346
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2347
    # Can't even acquire node locks in shared mode as upcoming changes in
2348
    # Ganeti 2.6 will start to modify the node object on disk conversion
2349
    self.needed_locks[locking.LEVEL_NODE] = []
2350
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2351
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2352
    # Look node group to look up the ipolicy
2353
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2354

    
2355
  def DeclareLocks(self, level):
2356
    if level == locking.LEVEL_NODEGROUP:
2357
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2358
      # Acquire locks for the instance's nodegroups optimistically. Needs
2359
      # to be verified in CheckPrereq
2360
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2361
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2362
    elif level == locking.LEVEL_NODE:
2363
      self._LockInstancesNodes()
2364
      if self.op.disk_template and self.op.remote_node:
2365
        self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2366
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2367
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2368
      # Copy node locks
2369
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2370
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2371

    
2372
  def BuildHooksEnv(self):
2373
    """Build hooks env.
2374

2375
    This runs on the master, primary and secondaries.
2376

2377
    """
2378
    args = {}
2379
    if constants.BE_MINMEM in self.be_new:
2380
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2381
    if constants.BE_MAXMEM in self.be_new:
2382
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2383
    if constants.BE_VCPUS in self.be_new:
2384
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2385
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2386
    # information at all.
2387

    
2388
    if self._new_nics is not None:
2389
      nics = []
2390

    
2391
      for nic in self._new_nics:
2392
        n = copy.deepcopy(nic)
2393
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2394
        n.nicparams = nicparams
2395
        nics.append(NICToTuple(self, n))
2396

    
2397
      args["nics"] = nics
2398

    
2399
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2400
    if self.op.disk_template:
2401
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2402
    if self.op.runtime_mem:
2403
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2404

    
2405
    return env
2406

    
2407
  def BuildHooksNodes(self):
2408
    """Build hooks nodes.
2409

2410
    """
2411
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2412
    return (nl, nl)
2413

    
2414
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2415
                              old_params, cluster, pnode):
2416

    
2417
    update_params_dict = dict([(key, params[key])
2418
                               for key in constants.NICS_PARAMETERS
2419
                               if key in params])
2420

    
2421
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2422
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2423

    
2424
    new_net_uuid = None
2425
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2426
    if new_net_uuid_or_name:
2427
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2428
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2429

    
2430
    if old_net_uuid:
2431
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2432

    
2433
    if new_net_uuid:
2434
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2435
      if not netparams:
2436
        raise errors.OpPrereqError("No netparams found for the network"
2437
                                   " %s, probably not connected" %
2438
                                   new_net_obj.name, errors.ECODE_INVAL)
2439
      new_params = dict(netparams)
2440
    else:
2441
      new_params = GetUpdatedParams(old_params, update_params_dict)
2442

    
2443
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2444

    
2445
    new_filled_params = cluster.SimpleFillNIC(new_params)
2446
    objects.NIC.CheckParameterSyntax(new_filled_params)
2447

    
2448
    new_mode = new_filled_params[constants.NIC_MODE]
2449
    if new_mode == constants.NIC_MODE_BRIDGED:
2450
      bridge = new_filled_params[constants.NIC_LINK]
2451
      msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2452
      if msg:
2453
        msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2454
        if self.op.force:
2455
          self.warn.append(msg)
2456
        else:
2457
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2458

    
2459
    elif new_mode == constants.NIC_MODE_ROUTED:
2460
      ip = params.get(constants.INIC_IP, old_ip)
2461
      if ip is None:
2462
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2463
                                   " on a routed NIC", errors.ECODE_INVAL)
2464

    
2465
    elif new_mode == constants.NIC_MODE_OVS:
2466
      # TODO: check OVS link
2467
      self.LogInfo("OVS links are currently not checked for correctness")
2468

    
2469
    if constants.INIC_MAC in params:
2470
      mac = params[constants.INIC_MAC]
2471
      if mac is None:
2472
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2473
                                   errors.ECODE_INVAL)
2474
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2475
        # otherwise generate the MAC address
2476
        params[constants.INIC_MAC] = \
2477
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2478
      else:
2479
        # or validate/reserve the current one
2480
        try:
2481
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2482
        except errors.ReservationError:
2483
          raise errors.OpPrereqError("MAC address '%s' already in use"
2484
                                     " in cluster" % mac,
2485
                                     errors.ECODE_NOTUNIQUE)
2486
    elif new_net_uuid != old_net_uuid:
2487

    
2488
      def get_net_prefix(net_uuid):
2489
        mac_prefix = None
2490
        if net_uuid:
2491
          nobj = self.cfg.GetNetwork(net_uuid)
2492
          mac_prefix = nobj.mac_prefix
2493

    
2494
        return mac_prefix
2495

    
2496
      new_prefix = get_net_prefix(new_net_uuid)
2497
      old_prefix = get_net_prefix(old_net_uuid)
2498
      if old_prefix != new_prefix:
2499
        params[constants.INIC_MAC] = \
2500
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2501

    
2502
    # if there is a change in (ip, network) tuple
2503
    new_ip = params.get(constants.INIC_IP, old_ip)
2504
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2505
      if new_ip:
2506
        # if IP is pool then require a network and generate one IP
2507
        if new_ip.lower() == constants.NIC_IP_POOL:
2508
          if new_net_uuid:
2509
            try:
2510
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2511
            except errors.ReservationError:
2512
              raise errors.OpPrereqError("Unable to get a free IP"
2513
                                         " from the address pool",
2514
                                         errors.ECODE_STATE)
2515
            self.LogInfo("Chose IP %s from network %s",
2516
                         new_ip,
2517
                         new_net_obj.name)
2518
            params[constants.INIC_IP] = new_ip
2519
          else:
2520
            raise errors.OpPrereqError("ip=pool, but no network found",
2521
                                       errors.ECODE_INVAL)
2522
        # Reserve new IP if in the new network if any
2523
        elif new_net_uuid:
2524
          try:
2525
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2526
            self.LogInfo("Reserving IP %s in network %s",
2527
                         new_ip, new_net_obj.name)
2528
          except errors.ReservationError:
2529
            raise errors.OpPrereqError("IP %s not available in network %s" %
2530
                                       (new_ip, new_net_obj.name),
2531
                                       errors.ECODE_NOTUNIQUE)
2532
        # new network is None so check if new IP is a conflicting IP
2533
        elif self.op.conflicts_check:
2534
          _CheckForConflictingIp(self, new_ip, pnode)
2535

    
2536
      # release old IP if old network is not None
2537
      if old_ip and old_net_uuid:
2538
        try:
2539
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2540
        except errors.AddressPoolError:
2541
          logging.warning("Release IP %s not contained in network %s",
2542
                          old_ip, old_net_obj.name)
2543

    
2544
    # there are no changes in (ip, network) tuple and old network is not None
2545
    elif (old_net_uuid is not None and
2546
          (req_link is not None or req_mode is not None)):
2547
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2548
                                 " a NIC that is connected to a network",
2549
                                 errors.ECODE_INVAL)
2550

    
2551
    private.params = new_params
2552
    private.filled = new_filled_params
2553

    
2554
  def _PreCheckDiskTemplate(self, pnode_info):
2555
    """CheckPrereq checks related to a new disk template."""
2556
    # Arguments are passed to avoid configuration lookups
2557
    instance = self.instance
2558
    pnode = instance.primary_node
2559
    cluster = self.cluster
2560
    if instance.disk_template == self.op.disk_template:
2561
      raise errors.OpPrereqError("Instance already has disk template %s" %
2562
                                 instance.disk_template, errors.ECODE_INVAL)
2563

    
2564
    if (instance.disk_template,
2565
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2566
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2567
                                 " %s to %s" % (instance.disk_template,
2568
                                                self.op.disk_template),
2569
                                 errors.ECODE_INVAL)
2570
    CheckInstanceState(self, instance, INSTANCE_DOWN,
2571
                       msg="cannot change disk template")
2572
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2573
      if self.op.remote_node == pnode:
2574
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2575
                                   " as the primary node of the instance" %
2576
                                   self.op.remote_node, errors.ECODE_STATE)
2577
      CheckNodeOnline(self, self.op.remote_node)
2578
      CheckNodeNotDrained(self, self.op.remote_node)
2579
      # FIXME: here we assume that the old instance type is DT_PLAIN
2580
      assert instance.disk_template == constants.DT_PLAIN
2581
      disks = [{constants.IDISK_SIZE: d.size,
2582
                constants.IDISK_VG: d.logical_id[0]}
2583
               for d in instance.disks]
2584
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2585
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2586

    
2587
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2588
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2589
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2590
                                                              snode_group)
2591
      CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2592
                             ignore=self.op.ignore_ipolicy)
2593
      if pnode_info.group != snode_info.group:
2594
        self.LogWarning("The primary and secondary nodes are in two"
2595
                        " different node groups; the disk parameters"
2596
                        " from the first disk's node group will be"
2597
                        " used")
2598

    
2599
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2600
      # Make sure none of the nodes require exclusive storage
2601
      nodes = [pnode_info]
2602
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2603
        assert snode_info
2604
        nodes.append(snode_info)
2605
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2606
      if compat.any(map(has_es, nodes)):
2607
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2608
                  " storage is enabled" % (instance.disk_template,
2609
                                           self.op.disk_template))
2610
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2611

    
2612
  def _PreCheckDisks(self, ispec):
2613
    """CheckPrereq checks related to disk changes.
2614

2615
    @type ispec: dict
2616
    @param ispec: instance specs to be updated with the new disks
2617

2618
    """
2619
    instance = self.instance
2620
    self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2621

    
2622
    excl_stor = compat.any(
2623
      rpc.GetExclusiveStorageForNodeNames(self.cfg, instance.all_nodes).values()
2624
      )
2625

    
2626
    # Check disk modifications. This is done here and not in CheckArguments
2627
    # (as with NICs), because we need to know the instance's disk template
2628
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2629
    if instance.disk_template == constants.DT_EXT:
2630
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2631
    else:
2632
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2633
                      ver_fn)
2634

    
2635
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2636

    
2637
    # Check the validity of the `provider' parameter
2638
    if instance.disk_template in constants.DT_EXT:
2639
      for mod in self.diskmod:
2640
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2641
        if mod[0] == constants.DDM_ADD:
2642
          if ext_provider is None:
2643
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2644
                                       " '%s' missing, during disk add" %
2645
                                       (constants.DT_EXT,
2646
                                        constants.IDISK_PROVIDER),
2647
                                       errors.ECODE_NOENT)
2648
        elif mod[0] == constants.DDM_MODIFY:
2649
          if ext_provider:
2650
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2651
                                       " modification" %
2652
                                       constants.IDISK_PROVIDER,
2653
                                       errors.ECODE_INVAL)
2654
    else:
2655
      for mod in self.diskmod:
2656
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2657
        if ext_provider is not None:
2658
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2659
                                     " instances of type '%s'" %
2660
                                     (constants.IDISK_PROVIDER,
2661
                                      constants.DT_EXT),
2662
                                     errors.ECODE_INVAL)
2663

    
2664
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2665
      raise errors.OpPrereqError("Disk operations not supported for"
2666
                                 " diskless instances", errors.ECODE_INVAL)
2667

    
2668
    def _PrepareDiskMod(_, disk, params, __):
2669
      disk.name = params.get(constants.IDISK_NAME, None)
2670

    
2671
    # Verify disk changes (operating on a copy)
2672
    disks = copy.deepcopy(instance.disks)
2673
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2674
                        _PrepareDiskMod, None)
2675
    utils.ValidateDeviceNames("disk", disks)
2676
    if len(disks) > constants.MAX_DISKS:
2677
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2678
                                 " more" % constants.MAX_DISKS,
2679
                                 errors.ECODE_STATE)
2680
    disk_sizes = [disk.size for disk in instance.disks]
2681
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2682
                      self.diskmod if op == constants.DDM_ADD)
2683
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2684
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2685

    
2686
    if self.op.offline is not None and self.op.offline:
2687
      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2688
                         msg="can't change to offline")
2689

    
2690
  def CheckPrereq(self):
2691
    """Check prerequisites.
2692

2693
    This only checks the instance list against the existing names.
2694

2695
    """
2696
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2697
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2698

    
2699
    cluster = self.cluster = self.cfg.GetClusterInfo()
2700
    assert self.instance is not None, \
2701
      "Cannot retrieve locked instance %s" % self.op.instance_name
2702

    
2703
    pnode = instance.primary_node
2704

    
2705
    self.warn = []
2706

    
2707
    if (self.op.pnode is not None and self.op.pnode != pnode and
2708
        not self.op.force):
2709
      # verify that the instance is not up
2710
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2711
                                                  instance.hypervisor)
2712
      if instance_info.fail_msg:
2713
        self.warn.append("Can't get instance runtime information: %s" %
2714
                         instance_info.fail_msg)
2715
      elif instance_info.payload:
2716
        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2717
                                   errors.ECODE_STATE)
2718

    
2719
    assert pnode in self.owned_locks(locking.LEVEL_NODE)
2720
    nodelist = list(instance.all_nodes)
2721
    pnode_info = self.cfg.GetNodeInfo(pnode)
2722

    
2723
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2724
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2725
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2726

    
2727
    # dictionary with instance information after the modification
2728
    ispec = {}
2729

    
2730
    # Prepare NIC modifications
2731
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2732

    
2733
    # OS change
2734
    if self.op.os_name and not self.op.force:
2735
      CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2736
                     self.op.force_variant)
2737
      instance_os = self.op.os_name
2738
    else:
2739
      instance_os = instance.os
2740

    
2741
    assert not (self.op.disk_template and self.op.disks), \
2742
      "Can't modify disk template and apply disk changes at the same time"
2743

    
2744
    if self.op.disk_template:
2745
      self._PreCheckDiskTemplate(pnode_info)
2746

    
2747
    self._PreCheckDisks(ispec)
2748

    
2749
    # hvparams processing
2750
    if self.op.hvparams:
2751
      hv_type = instance.hypervisor
2752
      i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2753
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2754
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2755

    
2756
      # local check
2757
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2758
      CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2759
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2760
      self.hv_inst = i_hvdict # the new dict (without defaults)
2761
    else:
2762
      self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2763
                                              instance.hvparams)
2764
      self.hv_new = self.hv_inst = {}
2765

    
2766
    # beparams processing
2767
    if self.op.beparams:
2768
      i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2769
                                  use_none=True)
2770
      objects.UpgradeBeParams(i_bedict)
2771
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2772
      be_new = cluster.SimpleFillBE(i_bedict)
2773
      self.be_proposed = self.be_new = be_new # the new actual values
2774
      self.be_inst = i_bedict # the new dict (without defaults)
2775
    else:
2776
      self.be_new = self.be_inst = {}
2777
      self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2778
    be_old = cluster.FillBE(instance)
2779

    
2780
    # CPU param validation -- checking every time a parameter is
2781
    # changed to cover all cases where either CPU mask or vcpus have
2782
    # changed
2783
    if (constants.BE_VCPUS in self.be_proposed and
2784
        constants.HV_CPU_MASK in self.hv_proposed):
2785
      cpu_list = \
2786
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2787
      # Verify mask is consistent with number of vCPUs. Can skip this
2788
      # test if only 1 entry in the CPU mask, which means same mask
2789
      # is applied to all vCPUs.
2790
      if (len(cpu_list) > 1 and
2791
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2792
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2793
                                   " CPU mask [%s]" %
2794
                                   (self.be_proposed[constants.BE_VCPUS],
2795
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2796
                                   errors.ECODE_INVAL)
2797

    
2798
      # Only perform this test if a new CPU mask is given
2799
      if constants.HV_CPU_MASK in self.hv_new:
2800
        # Calculate the largest CPU number requested
2801
        max_requested_cpu = max(map(max, cpu_list))
2802
        # Check that all of the instance's nodes have enough physical CPUs to
2803
        # satisfy the requested CPU mask
2804
        _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2805
                                max_requested_cpu + 1, instance.hypervisor)
2806

    
2807
    # osparams processing
2808
    if self.op.osparams:
2809
      i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2810
      CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2811
      self.os_inst = i_osdict # the new dict (without defaults)
2812
    else:
2813
      self.os_inst = {}
2814

    
2815
    #TODO(dynmem): do the appropriate check involving MINMEM
2816
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2817
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2818
      mem_check_list = [pnode]
2819
      if be_new[constants.BE_AUTO_BALANCE]:
2820
        # either we changed auto_balance to yes or it was from before
2821
        mem_check_list.extend(instance.secondary_nodes)
2822
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2823
                                                  instance.hypervisor)
2824
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2825
                                         [instance.hypervisor], False)
2826
      pninfo = nodeinfo[pnode]
2827
      msg = pninfo.fail_msg
2828
      if msg:
2829
        # Assume the primary node is unreachable and go ahead
2830
        self.warn.append("Can't get info from primary node %s: %s" %
2831
                         (pnode, msg))
2832
      else:
2833
        (_, _, (pnhvinfo, )) = pninfo.payload
2834
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2835
          self.warn.append("Node data from primary node %s doesn't contain"
2836
                           " free memory information" % pnode)
2837
        elif instance_info.fail_msg:
2838
          self.warn.append("Can't get instance runtime information: %s" %
2839
                           instance_info.fail_msg)
2840
        else:
2841
          if instance_info.payload:
2842
            current_mem = int(instance_info.payload["memory"])
2843
          else:
2844
            # Assume instance not running
2845
            # (there is a slight race condition here, but it's not very
2846
            # probable, and we have no other way to check)
2847
            # TODO: Describe race condition
2848
            current_mem = 0
2849
          #TODO(dynmem): do the appropriate check involving MINMEM
2850
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2851
                      pnhvinfo["memory_free"])
2852
          if miss_mem > 0:
2853
            raise errors.OpPrereqError("This change will prevent the instance"
2854
                                       " from starting, due to %d MB of memory"
2855
                                       " missing on its primary node" %
2856
                                       miss_mem, errors.ECODE_NORES)
2857

    
2858
      if be_new[constants.BE_AUTO_BALANCE]:
2859
        for node, nres in nodeinfo.items():
2860
          if node not in instance.secondary_nodes:
2861
            continue
2862
          nres.Raise("Can't get info from secondary node %s" % node,
2863
                     prereq=True, ecode=errors.ECODE_STATE)
2864
          (_, _, (nhvinfo, )) = nres.payload
2865
          if not isinstance(nhvinfo.get("memory_free", None), int):
2866
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2867
                                       " memory information" % node,
2868
                                       errors.ECODE_STATE)
2869
          #TODO(dynmem): do the appropriate check involving MINMEM
2870
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2871
            raise errors.OpPrereqError("This change will prevent the instance"
2872
                                       " from failover to its secondary node"
2873
                                       " %s, due to not enough memory" % node,
2874
                                       errors.ECODE_STATE)
2875

    
2876
    if self.op.runtime_mem:
2877
      remote_info = self.rpc.call_instance_info(instance.primary_node,
2878
                                                instance.name,
2879
                                                instance.hypervisor)
2880
      remote_info.Raise("Error checking node %s" % instance.primary_node)
2881
      if not remote_info.payload: # not running already
2882
        raise errors.OpPrereqError("Instance %s is not running" %
2883
                                   instance.name, errors.ECODE_STATE)
2884

    
2885
      current_memory = remote_info.payload["memory"]
2886
      if (not self.op.force and
2887
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2888
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2889
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2890
                                   " and %d MB of memory unless --force is"
2891
                                   " given" %
2892
                                   (instance.name,
2893
                                    self.be_proposed[constants.BE_MINMEM],
2894
                                    self.be_proposed[constants.BE_MAXMEM]),
2895
                                   errors.ECODE_INVAL)
2896

    
2897
      delta = self.op.runtime_mem - current_memory
2898
      if delta > 0:
2899
        CheckNodeFreeMemory(self, instance.primary_node,
2900
                            "ballooning memory for instance %s" %
2901
                            instance.name, delta, instance.hypervisor)
2902

    
2903
    def _PrepareNicCreate(_, params, private):
2904
      self._PrepareNicModification(params, private, None, None,
2905
                                   {}, cluster, pnode)
2906
      return (None, None)
2907

    
2908
    def _PrepareNicMod(_, nic, params, private):
2909
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2910
                                   nic.nicparams, cluster, pnode)
2911
      return None
2912

    
2913
    def _PrepareNicRemove(_, params, __):
2914
      ip = params.ip
2915
      net = params.network
2916
      if net is not None and ip is not None:
2917
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2918

    
2919
    # Verify NIC changes (operating on copy)
2920
    nics = instance.nics[:]
2921
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2922
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2923
    if len(nics) > constants.MAX_NICS:
2924
      raise errors.OpPrereqError("Instance has too many network interfaces"
2925
                                 " (%d), cannot add more" % constants.MAX_NICS,
2926
                                 errors.ECODE_STATE)
2927

    
2928
    # Pre-compute NIC changes (necessary to use result in hooks)
2929
    self._nic_chgdesc = []
2930
    if self.nicmod:
2931
      # Operate on copies as this is still in prereq
2932
      nics = [nic.Copy() for nic in instance.nics]
2933
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2934
                          self._CreateNewNic, self._ApplyNicMods, None)
2935
      # Verify that NIC names are unique and valid
2936
      utils.ValidateDeviceNames("NIC", nics)
2937
      self._new_nics = nics
2938
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2939
    else:
2940
      self._new_nics = None
2941
      ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2942

    
2943
    if not self.op.ignore_ipolicy:
2944
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2945
                                                              group_info)
2946

    
2947
      # Fill ispec with backend parameters
2948
      ispec[constants.ISPEC_SPINDLE_USE] = \
2949
        self.be_new.get(constants.BE_SPINDLE_USE, None)
2950
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2951
                                                         None)
2952

    
2953
      # Copy ispec to verify parameters with min/max values separately
2954
      if self.op.disk_template:
2955
        new_disk_template = self.op.disk_template
2956
      else:
2957
        new_disk_template = instance.disk_template
2958
      ispec_max = ispec.copy()
2959
      ispec_max[constants.ISPEC_MEM_SIZE] = \
2960
        self.be_new.get(constants.BE_MAXMEM, None)
2961
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
2962
                                                     new_disk_template)
2963
      ispec_min = ispec.copy()
2964
      ispec_min[constants.ISPEC_MEM_SIZE] = \
2965
        self.be_new.get(constants.BE_MINMEM, None)
2966
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
2967
                                                     new_disk_template)
2968

    
2969
      if (res_max or res_min):
2970
        # FIXME: Improve error message by including information about whether
2971
        # the upper or lower limit of the parameter fails the ipolicy.
2972
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
2973
               (group_info, group_info.name,
2974
                utils.CommaJoin(set(res_max + res_min))))
2975
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2976

    
2977
  def _ConvertPlainToDrbd(self, feedback_fn):
2978
    """Converts an instance from plain to drbd.
2979

2980
    """
2981
    feedback_fn("Converting template to drbd")
2982
    instance = self.instance
2983
    pnode = instance.primary_node
2984
    snode = self.op.remote_node
2985

    
2986
    assert instance.disk_template == constants.DT_PLAIN
2987

    
2988
    # create a fake disk info for _GenerateDiskTemplate
2989
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
2990
                  constants.IDISK_VG: d.logical_id[0],
2991
                  constants.IDISK_NAME: d.name}
2992
                 for d in instance.disks]
2993
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
2994
                                     instance.name, pnode, [snode],
2995
                                     disk_info, None, None, 0, feedback_fn,
2996
                                     self.diskparams)
2997
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
2998
                                        self.diskparams)
2999
    p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
3000
    s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
3001
    info = GetInstanceInfoText(instance)
3002
    feedback_fn("Creating additional volumes...")
3003
    # first, create the missing data and meta devices
3004
    for disk in anno_disks:
3005
      # unfortunately this is... not too nice
3006
      CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3007
                           info, True, p_excl_stor)
3008
      for child in disk.children:
3009
        CreateSingleBlockDev(self, snode, instance, child, info, True,
3010
                             s_excl_stor)
3011
    # at this stage, all new LVs have been created, we can rename the
3012
    # old ones
3013
    feedback_fn("Renaming original volumes...")
3014
    rename_list = [(o, n.children[0].logical_id)
3015
                   for (o, n) in zip(instance.disks, new_disks)]
3016
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
3017
    result.Raise("Failed to rename original LVs")
3018

    
3019
    feedback_fn("Initializing DRBD devices...")
3020
    # all child devices are in place, we can now create the DRBD devices
3021
    try:
3022
      for disk in anno_disks:
3023
        for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3024
          f_create = node == pnode
3025
          CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3026
                               excl_stor)
3027
    except errors.GenericError, e:
3028
      feedback_fn("Initializing of DRBD devices failed;"
3029
                  " renaming back original volumes...")
3030
      for disk in new_disks:
3031
        self.cfg.SetDiskID(disk, pnode)
3032
      rename_back_list = [(n.children[0], o.logical_id)
3033
                          for (n, o) in zip(new_disks, instance.disks)]
3034
      result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3035
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3036
      raise
3037

    
3038
    # at this point, the instance has been modified
3039
    instance.disk_template = constants.DT_DRBD8
3040
    instance.disks = new_disks
3041
    self.cfg.Update(instance, feedback_fn)
3042

    
3043
    # Release node locks while waiting for sync
3044
    ReleaseLocks(self, locking.LEVEL_NODE)
3045

    
3046
    # disks are created, waiting for sync
3047
    disk_abort = not WaitForSync(self, instance,
3048
                                 oneshot=not self.op.wait_for_sync)
3049
    if disk_abort:
3050
      raise errors.OpExecError("There are some degraded disks for"
3051
                               " this instance, please cleanup manually")
3052

    
3053
    # Node resource locks will be released by caller
3054

    
3055
  def _ConvertDrbdToPlain(self, feedback_fn):
3056
    """Converts an instance from drbd to plain.
3057

3058
    """
3059
    instance = self.instance
3060

    
3061
    assert len(instance.secondary_nodes) == 1
3062
    assert instance.disk_template == constants.DT_DRBD8
3063

    
3064
    pnode = instance.primary_node
3065
    snode = instance.secondary_nodes[0]
3066
    feedback_fn("Converting template to plain")
3067

    
3068
    old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3069
    new_disks = [d.children[0] for d in instance.disks]
3070

    
3071
    # copy over size, mode and name
3072
    for parent, child in zip(old_disks, new_disks):
3073
      child.size = parent.size
3074
      child.mode = parent.mode
3075
      child.name = parent.name
3076

    
3077
    # this is a DRBD disk, return its port to the pool
3078
    # NOTE: this must be done right before the call to cfg.Update!
3079
    for disk in old_disks:
3080
      tcp_port = disk.logical_id[2]
3081
      self.cfg.AddTcpUdpPort(tcp_port)
3082

    
3083
    # update instance structure
3084
    instance.disks = new_disks
3085
    instance.disk_template = constants.DT_PLAIN
3086
    _UpdateIvNames(0, instance.disks)
3087
    self.cfg.Update(instance, feedback_fn)
3088

    
3089
    # Release locks in case removing disks takes a while
3090
    ReleaseLocks(self, locking.LEVEL_NODE)
3091

    
3092
    feedback_fn("Removing volumes on the secondary node...")
3093
    for disk in old_disks:
3094
      self.cfg.SetDiskID(disk, snode)
3095
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3096
      if msg:
3097
        self.LogWarning("Could not remove block device %s on node %s,"
3098
                        " continuing anyway: %s", disk.iv_name, snode, msg)
3099

    
3100
    feedback_fn("Removing unneeded volumes on the primary node...")
3101
    for idx, disk in enumerate(old_disks):
3102
      meta = disk.children[1]
3103
      self.cfg.SetDiskID(meta, pnode)
3104
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3105
      if msg:
3106
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3107
                        " continuing anyway: %s", idx, pnode, msg)
3108

    
3109
  def _CreateNewDisk(self, idx, params, _):
3110
    """Creates a new disk.
3111

3112
    """
3113
    instance = self.instance
3114

    
3115
    # add a new disk
3116
    if instance.disk_template in constants.DTS_FILEBASED:
3117
      (file_driver, file_path) = instance.disks[0].logical_id
3118
      file_path = os.path.dirname(file_path)
3119
    else:
3120
      file_driver = file_path = None
3121

    
3122
    disk = \
3123
      GenerateDiskTemplate(self, instance.disk_template, instance.name,
3124
                           instance.primary_node, instance.secondary_nodes,
3125
                           [params], file_path, file_driver, idx,
3126
                           self.Log, self.diskparams)[0]
3127

    
3128
    new_disks = CreateDisks(self, instance, disks=[disk])
3129

    
3130
    if self.cluster.prealloc_wipe_disks:
3131
      # Wipe new disk
3132
      WipeOrCleanupDisks(self, instance,
3133
                         disks=[(idx, disk, 0)],
3134
                         cleanup=new_disks)
3135

    
3136
    return (disk, [
3137
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3138
      ])
3139

    
3140
  @staticmethod
3141
  def _ModifyDisk(idx, disk, params, _):
3142
    """Modifies a disk.
3143

3144
    """
3145
    changes = []
3146
    mode = params.get(constants.IDISK_MODE, None)
3147
    if mode:
3148
      disk.mode = mode
3149
      changes.append(("disk.mode/%d" % idx, disk.mode))
3150

    
3151
    name = params.get(constants.IDISK_NAME, None)
3152
    disk.name = name
3153
    changes.append(("disk.name/%d" % idx, disk.name))
3154

    
3155
    return changes
3156

    
3157
  def _RemoveDisk(self, idx, root, _):
3158
    """Removes a disk.
3159

3160
    """
3161
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3162
    for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3163
      self.cfg.SetDiskID(disk, node)
3164
      msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3165
      if msg:
3166
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3167
                        " continuing anyway", idx, node, msg)
3168

    
3169
    # if this is a DRBD disk, return its port to the pool
3170
    if root.dev_type in constants.LDS_DRBD:
3171
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3172

    
3173
  def _CreateNewNic(self, idx, params, private):
3174
    """Creates data structure for a new network interface.
3175

3176
    """
3177
    mac = params[constants.INIC_MAC]
3178
    ip = params.get(constants.INIC_IP, None)
3179
    net = params.get(constants.INIC_NETWORK, None)
3180
    name = params.get(constants.INIC_NAME, None)
3181
    net_uuid = self.cfg.LookupNetwork(net)
3182
    #TODO: not private.filled?? can a nic have no nicparams??
3183
    nicparams = private.filled
3184
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3185
                       nicparams=nicparams)
3186
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3187

    
3188
    return (nobj, [
3189
      ("nic.%d" % idx,
3190
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3191
       (mac, ip, private.filled[constants.NIC_MODE],
3192
       private.filled[constants.NIC_LINK],
3193
       net)),
3194
      ])
3195

    
3196
  def _ApplyNicMods(self, idx, nic, params, private):
3197
    """Modifies a network interface.
3198

3199
    """
3200
    changes = []
3201

    
3202
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3203
      if key in params:
3204
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3205
        setattr(nic, key, params[key])
3206

    
3207
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3208
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3209
    if new_net_uuid != nic.network:
3210
      changes.append(("nic.network/%d" % idx, new_net))
3211
      nic.network = new_net_uuid
3212

    
3213
    if private.filled:
3214
      nic.nicparams = private.filled
3215

    
3216
      for (key, val) in nic.nicparams.items():
3217
        changes.append(("nic.%s/%d" % (key, idx), val))
3218

    
3219
    return changes
3220

    
3221
  def Exec(self, feedback_fn):
3222
    """Modifies an instance.
3223

3224
    All parameters take effect only at the next restart of the instance.
3225

3226
    """
3227
    # Process here the warnings from CheckPrereq, as we don't have a
3228
    # feedback_fn there.
3229
    # TODO: Replace with self.LogWarning
3230
    for warn in self.warn:
3231
      feedback_fn("WARNING: %s" % warn)
3232

    
3233
    assert ((self.op.disk_template is None) ^
3234
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3235
      "Not owning any node resource locks"
3236

    
3237
    result = []
3238
    instance = self.instance
3239

    
3240
    # New primary node
3241
    if self.op.pnode:
3242
      instance.primary_node = self.op.pnode
3243

    
3244
    # runtime memory
3245
    if self.op.runtime_mem:
3246
      rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3247
                                                     instance,
3248
                                                     self.op.runtime_mem)
3249
      rpcres.Raise("Cannot modify instance runtime memory")
3250
      result.append(("runtime_memory", self.op.runtime_mem))
3251

    
3252
    # Apply disk changes
3253
    _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3254
                        self._CreateNewDisk, self._ModifyDisk,
3255
                        self._RemoveDisk)
3256
    _UpdateIvNames(0, instance.disks)
3257

    
3258
    if self.op.disk_template:
3259
      if __debug__:
3260
        check_nodes = set(instance.all_nodes)
3261
        if self.op.remote_node:
3262
          check_nodes.add(self.op.remote_node)
3263
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3264
          owned = self.owned_locks(level)
3265
          assert not (check_nodes - owned), \
3266
            ("Not owning the correct locks, owning %r, expected at least %r" %
3267
             (owned, check_nodes))
3268

    
3269
      r_shut = ShutdownInstanceDisks(self, instance)
3270
      if not r_shut:
3271
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3272
                                 " proceed with disk template conversion")
3273
      mode = (instance.disk_template, self.op.disk_template)
3274
      try:
3275
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3276
      except:
3277
        self.cfg.ReleaseDRBDMinors(instance.name)
3278
        raise
3279
      result.append(("disk_template", self.op.disk_template))
3280

    
3281
      assert instance.disk_template == self.op.disk_template, \
3282
        ("Expected disk template '%s', found '%s'" %
3283
         (self.op.disk_template, instance.disk_template))
3284

    
3285
    # Release node and resource locks if there are any (they might already have
3286
    # been released during disk conversion)
3287
    ReleaseLocks(self, locking.LEVEL_NODE)
3288
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3289

    
3290
    # Apply NIC changes
3291
    if self._new_nics is not None:
3292
      instance.nics = self._new_nics
3293
      result.extend(self._nic_chgdesc)
3294

    
3295
    # hvparams changes
3296
    if self.op.hvparams:
3297
      instance.hvparams = self.hv_inst
3298
      for key, val in self.op.hvparams.iteritems():
3299
        result.append(("hv/%s" % key, val))
3300

    
3301
    # beparams changes
3302
    if self.op.beparams:
3303
      instance.beparams = self.be_inst
3304
      for key, val in self.op.beparams.iteritems():
3305
        result.append(("be/%s" % key, val))
3306

    
3307
    # OS change
3308
    if self.op.os_name:
3309
      instance.os = self.op.os_name
3310

    
3311
    # osparams changes
3312
    if self.op.osparams:
3313
      instance.osparams = self.os_inst
3314
      for key, val in self.op.osparams.iteritems():
3315
        result.append(("os/%s" % key, val))
3316

    
3317
    if self.op.offline is None:
3318
      # Ignore
3319
      pass
3320
    elif self.op.offline:
3321
      # Mark instance as offline
3322
      self.cfg.MarkInstanceOffline(instance.name)
3323
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3324
    else:
3325
      # Mark instance as online, but stopped
3326
      self.cfg.MarkInstanceDown(instance.name)
3327
      result.append(("admin_state", constants.ADMINST_DOWN))
3328

    
3329
    self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3330

    
3331
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3332
                self.owned_locks(locking.LEVEL_NODE)), \
3333
      "All node locks should have been released by now"
3334

    
3335
    return result
3336

    
3337
  _DISK_CONVERSIONS = {
3338
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3339
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3340
    }
3341

    
3342

    
3343
class LUInstanceChangeGroup(LogicalUnit):
3344
  HPATH = "instance-change-group"
3345
  HTYPE = constants.HTYPE_INSTANCE
3346
  REQ_BGL = False
3347

    
3348
  def ExpandNames(self):
3349
    self.share_locks = ShareAll()
3350

    
3351
    self.needed_locks = {
3352
      locking.LEVEL_NODEGROUP: [],
3353
      locking.LEVEL_NODE: [],
3354
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3355
      }
3356

    
3357
    self._ExpandAndLockInstance()
3358

    
3359
    if self.op.target_groups:
3360
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3361
                                  self.op.target_groups)
3362
    else:
3363
      self.req_target_uuids = None
3364

    
3365
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3366

    
3367
  def DeclareLocks(self, level):
3368
    if level == locking.LEVEL_NODEGROUP:
3369
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3370

    
3371
      if self.req_target_uuids:
3372
        lock_groups = set(self.req_target_uuids)
3373

    
3374
        # Lock all groups used by instance optimistically; this requires going
3375
        # via the node before it's locked, requiring verification later on
3376
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3377
        lock_groups.update(instance_groups)
3378
      else:
3379
        # No target groups, need to lock all of them
3380
        lock_groups = locking.ALL_SET
3381

    
3382
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3383

    
3384
    elif level == locking.LEVEL_NODE:
3385
      if self.req_target_uuids:
3386
        # Lock all nodes used by instances
3387
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3388
        self._LockInstancesNodes()
3389

    
3390
        # Lock all nodes in all potential target groups
3391
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3392
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3393
        member_nodes = [node_name
3394
                        for group in lock_groups
3395
                        for node_name in self.cfg.GetNodeGroup(group).members]
3396
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3397
      else:
3398
        # Lock all nodes as all groups are potential targets
3399
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3400

    
3401
  def CheckPrereq(self):
3402
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3403
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3404
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3405

    
3406
    assert (self.req_target_uuids is None or
3407
            owned_groups.issuperset(self.req_target_uuids))
3408
    assert owned_instances == set([self.op.instance_name])
3409

    
3410
    # Get instance information
3411
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3412

    
3413
    # Check if node groups for locked instance are still correct
3414
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3415
      ("Instance %s's nodes changed while we kept the lock" %
3416
       self.op.instance_name)
3417

    
3418
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3419
                                          owned_groups)
3420

    
3421
    if self.req_target_uuids:
3422
      # User requested specific target groups
3423
      self.target_uuids = frozenset(self.req_target_uuids)
3424
    else:
3425
      # All groups except those used by the instance are potential targets
3426
      self.target_uuids = owned_groups - inst_groups
3427

    
3428
    conflicting_groups = self.target_uuids & inst_groups
3429
    if conflicting_groups:
3430
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3431
                                 " used by the instance '%s'" %
3432
                                 (utils.CommaJoin(conflicting_groups),
3433
                                  self.op.instance_name),
3434
                                 errors.ECODE_INVAL)
3435

    
3436
    if not self.target_uuids:
3437
      raise errors.OpPrereqError("There are no possible target groups",
3438
                                 errors.ECODE_INVAL)
3439

    
3440
  def BuildHooksEnv(self):
3441
    """Build hooks env.
3442

3443
    """
3444
    assert self.target_uuids
3445

    
3446
    env = {
3447
      "TARGET_GROUPS": " ".join(self.target_uuids),
3448
      }
3449

    
3450
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3451

    
3452
    return env
3453

    
3454
  def BuildHooksNodes(self):
3455
    """Build hooks nodes.
3456

3457
    """
3458
    mn = self.cfg.GetMasterNode()
3459
    return ([mn], [mn])
3460

    
3461
  def Exec(self, feedback_fn):
3462
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3463

    
3464
    assert instances == [self.op.instance_name], "Instance not locked"
3465

    
3466
    req = iallocator.IAReqGroupChange(instances=instances,
3467
                                      target_groups=list(self.target_uuids))
3468
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3469

    
3470
    ial.Run(self.op.iallocator)
3471

    
3472
    if not ial.success:
3473
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3474
                                 " instance '%s' using iallocator '%s': %s" %
3475
                                 (self.op.instance_name, self.op.iallocator,
3476
                                  ial.info), errors.ECODE_NORES)
3477

    
3478
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3479

    
3480
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3481
                 " instance '%s'", len(jobs), self.op.instance_name)
3482

    
3483
    return ResultWithJobs(jobs)