Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 402df383

History | View | Annotate | Download (134.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176

    
177
    if net is None or net.lower() == constants.VALUE_NONE:
178
      net = None
179
    else:
180
      if nic_mode_req is not None or link is not None:
181
        raise errors.OpPrereqError("If network is given, no mode or link"
182
                                   " is allowed to be passed",
183
                                   errors.ECODE_INVAL)
184

    
185
    # ip validity checks
186
    if ip is None or ip.lower() == constants.VALUE_NONE:
187
      nic_ip = None
188
    elif ip.lower() == constants.VALUE_AUTO:
189
      if not op.name_check:
190
        raise errors.OpPrereqError("IP address set to auto but name checks"
191
                                   " have been skipped",
192
                                   errors.ECODE_INVAL)
193
      nic_ip = default_ip
194
    else:
195
      # We defer pool operations until later, so that the iallocator has
196
      # filled in the instance's node(s) dimara
197
      if ip.lower() == constants.NIC_IP_POOL:
198
        if net is None:
199
          raise errors.OpPrereqError("if ip=pool, parameter network"
200
                                     " must be passed too",
201
                                     errors.ECODE_INVAL)
202

    
203
      elif not netutils.IPAddress.IsValid(ip):
204
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
205
                                   errors.ECODE_INVAL)
206

    
207
      nic_ip = ip
208

    
209
    # TODO: check the ip address for uniqueness
210
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
211
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
212
                                 errors.ECODE_INVAL)
213

    
214
    # MAC address verification
215
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
216
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
217
      mac = utils.NormalizeAndValidateMac(mac)
218

    
219
      try:
220
        # TODO: We need to factor this out
221
        cfg.ReserveMAC(mac, ec_id)
222
      except errors.ReservationError:
223
        raise errors.OpPrereqError("MAC address %s already in use"
224
                                   " in cluster" % mac,
225
                                   errors.ECODE_NOTUNIQUE)
226

    
227
    #  Build nic parameters
228
    nicparams = {}
229
    if nic_mode_req:
230
      nicparams[constants.NIC_MODE] = nic_mode
231
    if link:
232
      nicparams[constants.NIC_LINK] = link
233

    
234
    check_params = cluster.SimpleFillNIC(nicparams)
235
    objects.NIC.CheckParameterSyntax(check_params)
236
    net_uuid = cfg.LookupNetwork(net)
237
    name = nic.get(constants.INIC_NAME, None)
238
    if name is not None and name.lower() == constants.VALUE_NONE:
239
      name = None
240
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
241
                          network=net_uuid, nicparams=nicparams)
242
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
243
    nics.append(nic_obj)
244

    
245
  return nics
246

    
247

    
248
def _CheckForConflictingIp(lu, ip, node):
249
  """In case of conflicting IP address raise error.
250

251
  @type ip: string
252
  @param ip: IP address
253
  @type node: string
254
  @param node: node name
255

256
  """
257
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
258
  if conf_net is not None:
259
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
260
                                " network %s, but the target NIC does not." %
261
                                (ip, conf_net)),
262
                               errors.ECODE_STATE)
263

    
264
  return (None, None)
265

    
266

    
267
def _ComputeIPolicyInstanceSpecViolation(
268
  ipolicy, instance_spec, disk_template,
269
  _compute_fn=ComputeIPolicySpecViolation):
270
  """Compute if instance specs meets the specs of ipolicy.
271

272
  @type ipolicy: dict
273
  @param ipolicy: The ipolicy to verify against
274
  @param instance_spec: dict
275
  @param instance_spec: The instance spec to verify
276
  @type disk_template: string
277
  @param disk_template: the disk template of the instance
278
  @param _compute_fn: The function to verify ipolicy (unittest only)
279
  @see: L{ComputeIPolicySpecViolation}
280

281
  """
282
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
283
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
284
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
285
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
286
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
287
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
288

    
289
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
290
                     disk_sizes, spindle_use, disk_template)
291

    
292

    
293
def _CheckOSVariant(os_obj, name):
294
  """Check whether an OS name conforms to the os variants specification.
295

296
  @type os_obj: L{objects.OS}
297
  @param os_obj: OS object to check
298
  @type name: string
299
  @param name: OS name passed by the user, to check for validity
300

301
  """
302
  variant = objects.OS.GetVariant(name)
303
  if not os_obj.supported_variants:
304
    if variant:
305
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
306
                                 " passed)" % (os_obj.name, variant),
307
                                 errors.ECODE_INVAL)
308
    return
309
  if not variant:
310
    raise errors.OpPrereqError("OS name must include a variant",
311
                               errors.ECODE_INVAL)
312

    
313
  if variant not in os_obj.supported_variants:
314
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
315

    
316

    
317
class LUInstanceCreate(LogicalUnit):
318
  """Create an instance.
319

320
  """
321
  HPATH = "instance-add"
322
  HTYPE = constants.HTYPE_INSTANCE
323
  REQ_BGL = False
324

    
325
  def CheckArguments(self):
326
    """Check arguments.
327

328
    """
329
    # do not require name_check to ease forward/backward compatibility
330
    # for tools
331
    if self.op.no_install and self.op.start:
332
      self.LogInfo("No-installation mode selected, disabling startup")
333
      self.op.start = False
334
    # validate/normalize the instance name
335
    self.op.instance_name = \
336
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
337

    
338
    if self.op.ip_check and not self.op.name_check:
339
      # TODO: make the ip check more flexible and not depend on the name check
340
      raise errors.OpPrereqError("Cannot do IP address check without a name"
341
                                 " check", errors.ECODE_INVAL)
342

    
343
    # check nics' parameter names
344
    for nic in self.op.nics:
345
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
346
    # check that NIC's parameters names are unique and valid
347
    utils.ValidateDeviceNames("NIC", self.op.nics)
348

    
349
    # check that disk's names are unique and valid
350
    utils.ValidateDeviceNames("disk", self.op.disks)
351

    
352
    cluster = self.cfg.GetClusterInfo()
353
    if not self.op.disk_template in cluster.enabled_disk_templates:
354
      raise errors.OpPrereqError("Cannot create an instance with disk template"
355
                                 " '%s', because it is not enabled in the"
356
                                 " cluster. Enabled disk templates are: %s." %
357
                                 (self.op.disk_template,
358
                                  ",".join(cluster.enabled_disk_templates)))
359

    
360
    # check disks. parameter names and consistent adopt/no-adopt strategy
361
    has_adopt = has_no_adopt = False
362
    for disk in self.op.disks:
363
      if self.op.disk_template != constants.DT_EXT:
364
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
365
      if constants.IDISK_ADOPT in disk:
366
        has_adopt = True
367
      else:
368
        has_no_adopt = True
369
    if has_adopt and has_no_adopt:
370
      raise errors.OpPrereqError("Either all disks are adopted or none is",
371
                                 errors.ECODE_INVAL)
372
    if has_adopt:
373
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
374
        raise errors.OpPrereqError("Disk adoption is not supported for the"
375
                                   " '%s' disk template" %
376
                                   self.op.disk_template,
377
                                   errors.ECODE_INVAL)
378
      if self.op.iallocator is not None:
379
        raise errors.OpPrereqError("Disk adoption not allowed with an"
380
                                   " iallocator script", errors.ECODE_INVAL)
381
      if self.op.mode == constants.INSTANCE_IMPORT:
382
        raise errors.OpPrereqError("Disk adoption not allowed for"
383
                                   " instance import", errors.ECODE_INVAL)
384
    else:
385
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
386
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
387
                                   " but no 'adopt' parameter given" %
388
                                   self.op.disk_template,
389
                                   errors.ECODE_INVAL)
390

    
391
    self.adopt_disks = has_adopt
392

    
393
    # instance name verification
394
    if self.op.name_check:
395
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
396
      self.op.instance_name = self.hostname1.name
397
      # used in CheckPrereq for ip ping check
398
      self.check_ip = self.hostname1.ip
399
    else:
400
      self.check_ip = None
401

    
402
    # file storage checks
403
    if (self.op.file_driver and
404
        not self.op.file_driver in constants.FILE_DRIVER):
405
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
406
                                 self.op.file_driver, errors.ECODE_INVAL)
407

    
408
    if self.op.disk_template == constants.DT_FILE:
409
      opcodes.RequireFileStorage()
410
    elif self.op.disk_template == constants.DT_SHARED_FILE:
411
      opcodes.RequireSharedFileStorage()
412

    
413
    ### Node/iallocator related checks
414
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
415

    
416
    if self.op.pnode is not None:
417
      if self.op.disk_template in constants.DTS_INT_MIRROR:
418
        if self.op.snode is None:
419
          raise errors.OpPrereqError("The networked disk templates need"
420
                                     " a mirror node", errors.ECODE_INVAL)
421
      elif self.op.snode:
422
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
423
                        " template")
424
        self.op.snode = None
425

    
426
    _CheckOpportunisticLocking(self.op)
427

    
428
    self._cds = GetClusterDomainSecret()
429

    
430
    if self.op.mode == constants.INSTANCE_IMPORT:
431
      # On import force_variant must be True, because if we forced it at
432
      # initial install, our only chance when importing it back is that it
433
      # works again!
434
      self.op.force_variant = True
435

    
436
      if self.op.no_install:
437
        self.LogInfo("No-installation mode has no effect during import")
438

    
439
    elif self.op.mode == constants.INSTANCE_CREATE:
440
      if self.op.os_type is None:
441
        raise errors.OpPrereqError("No guest OS specified",
442
                                   errors.ECODE_INVAL)
443
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
444
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
445
                                   " installation" % self.op.os_type,
446
                                   errors.ECODE_STATE)
447
      if self.op.disk_template is None:
448
        raise errors.OpPrereqError("No disk template specified",
449
                                   errors.ECODE_INVAL)
450

    
451
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
452
      # Check handshake to ensure both clusters have the same domain secret
453
      src_handshake = self.op.source_handshake
454
      if not src_handshake:
455
        raise errors.OpPrereqError("Missing source handshake",
456
                                   errors.ECODE_INVAL)
457

    
458
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
459
                                                           src_handshake)
460
      if errmsg:
461
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
462
                                   errors.ECODE_INVAL)
463

    
464
      # Load and check source CA
465
      self.source_x509_ca_pem = self.op.source_x509_ca
466
      if not self.source_x509_ca_pem:
467
        raise errors.OpPrereqError("Missing source X509 CA",
468
                                   errors.ECODE_INVAL)
469

    
470
      try:
471
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
472
                                                    self._cds)
473
      except OpenSSL.crypto.Error, err:
474
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
475
                                   (err, ), errors.ECODE_INVAL)
476

    
477
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
478
      if errcode is not None:
479
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
480
                                   errors.ECODE_INVAL)
481

    
482
      self.source_x509_ca = cert
483

    
484
      src_instance_name = self.op.source_instance_name
485
      if not src_instance_name:
486
        raise errors.OpPrereqError("Missing source instance name",
487
                                   errors.ECODE_INVAL)
488

    
489
      self.source_instance_name = \
490
        netutils.GetHostname(name=src_instance_name).name
491

    
492
    else:
493
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
494
                                 self.op.mode, errors.ECODE_INVAL)
495

    
496
  def ExpandNames(self):
497
    """ExpandNames for CreateInstance.
498

499
    Figure out the right locks for instance creation.
500

501
    """
502
    self.needed_locks = {}
503

    
504
    instance_name = self.op.instance_name
505
    # this is just a preventive check, but someone might still add this
506
    # instance in the meantime, and creation will fail at lock-add time
507
    if instance_name in self.cfg.GetInstanceList():
508
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
509
                                 instance_name, errors.ECODE_EXISTS)
510

    
511
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
512

    
513
    if self.op.iallocator:
514
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
515
      # specifying a group on instance creation and then selecting nodes from
516
      # that group
517
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
518
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
519

    
520
      if self.op.opportunistic_locking:
521
        self.opportunistic_locks[locking.LEVEL_NODE] = True
522
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
523
    else:
524
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
525
      nodelist = [self.op.pnode]
526
      if self.op.snode is not None:
527
        self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
528
        nodelist.append(self.op.snode)
529
      self.needed_locks[locking.LEVEL_NODE] = nodelist
530

    
531
    # in case of import lock the source node too
532
    if self.op.mode == constants.INSTANCE_IMPORT:
533
      src_node = self.op.src_node
534
      src_path = self.op.src_path
535

    
536
      if src_path is None:
537
        self.op.src_path = src_path = self.op.instance_name
538

    
539
      if src_node is None:
540
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
541
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
542
        self.op.src_node = None
543
        if os.path.isabs(src_path):
544
          raise errors.OpPrereqError("Importing an instance from a path"
545
                                     " requires a source node option",
546
                                     errors.ECODE_INVAL)
547
      else:
548
        self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
549
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
550
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
551
        if not os.path.isabs(src_path):
552
          self.op.src_path = src_path = \
553
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
554

    
555
    self.needed_locks[locking.LEVEL_NODE_RES] = \
556
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
557

    
558
  def _RunAllocator(self):
559
    """Run the allocator based on input opcode.
560

561
    """
562
    if self.op.opportunistic_locking:
563
      # Only consider nodes for which a lock is held
564
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
565
    else:
566
      node_whitelist = None
567

    
568
    #TODO Export network to iallocator so that it chooses a pnode
569
    #     in a nodegroup that has the desired network connected to
570
    req = _CreateInstanceAllocRequest(self.op, self.disks,
571
                                      self.nics, self.be_full,
572
                                      node_whitelist)
573
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
574

    
575
    ial.Run(self.op.iallocator)
576

    
577
    if not ial.success:
578
      # When opportunistic locks are used only a temporary failure is generated
579
      if self.op.opportunistic_locking:
580
        ecode = errors.ECODE_TEMP_NORES
581
      else:
582
        ecode = errors.ECODE_NORES
583

    
584
      raise errors.OpPrereqError("Can't compute nodes using"
585
                                 " iallocator '%s': %s" %
586
                                 (self.op.iallocator, ial.info),
587
                                 ecode)
588

    
589
    self.op.pnode = ial.result[0]
590
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
591
                 self.op.instance_name, self.op.iallocator,
592
                 utils.CommaJoin(ial.result))
593

    
594
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
595

    
596
    if req.RequiredNodes() == 2:
597
      self.op.snode = ial.result[1]
598

    
599
  def BuildHooksEnv(self):
600
    """Build hooks env.
601

602
    This runs on master, primary and secondary nodes of the instance.
603

604
    """
605
    env = {
606
      "ADD_MODE": self.op.mode,
607
      }
608
    if self.op.mode == constants.INSTANCE_IMPORT:
609
      env["SRC_NODE"] = self.op.src_node
610
      env["SRC_PATH"] = self.op.src_path
611
      env["SRC_IMAGES"] = self.src_images
612

    
613
    env.update(BuildInstanceHookEnv(
614
      name=self.op.instance_name,
615
      primary_node=self.op.pnode,
616
      secondary_nodes=self.secondaries,
617
      status=self.op.start,
618
      os_type=self.op.os_type,
619
      minmem=self.be_full[constants.BE_MINMEM],
620
      maxmem=self.be_full[constants.BE_MAXMEM],
621
      vcpus=self.be_full[constants.BE_VCPUS],
622
      nics=NICListToTuple(self, self.nics),
623
      disk_template=self.op.disk_template,
624
      disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
625
              d[constants.IDISK_MODE]) for d in self.disks],
626
      bep=self.be_full,
627
      hvp=self.hv_full,
628
      hypervisor_name=self.op.hypervisor,
629
      tags=self.op.tags,
630
      ))
631

    
632
    return env
633

    
634
  def BuildHooksNodes(self):
635
    """Build hooks nodes.
636

637
    """
638
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
639
    return nl, nl
640

    
641
  def _ReadExportInfo(self):
642
    """Reads the export information from disk.
643

644
    It will override the opcode source node and path with the actual
645
    information, if these two were not specified before.
646

647
    @return: the export information
648

649
    """
650
    assert self.op.mode == constants.INSTANCE_IMPORT
651

    
652
    src_node = self.op.src_node
653
    src_path = self.op.src_path
654

    
655
    if src_node is None:
656
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
657
      exp_list = self.rpc.call_export_list(locked_nodes)
658
      found = False
659
      for node in exp_list:
660
        if exp_list[node].fail_msg:
661
          continue
662
        if src_path in exp_list[node].payload:
663
          found = True
664
          self.op.src_node = src_node = node
665
          self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
666
                                                       src_path)
667
          break
668
      if not found:
669
        raise errors.OpPrereqError("No export found for relative path %s" %
670
                                   src_path, errors.ECODE_INVAL)
671

    
672
    CheckNodeOnline(self, src_node)
673
    result = self.rpc.call_export_info(src_node, src_path)
674
    result.Raise("No export or invalid export found in dir %s" % src_path)
675

    
676
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
677
    if not export_info.has_section(constants.INISECT_EXP):
678
      raise errors.ProgrammerError("Corrupted export config",
679
                                   errors.ECODE_ENVIRON)
680

    
681
    ei_version = export_info.get(constants.INISECT_EXP, "version")
682
    if (int(ei_version) != constants.EXPORT_VERSION):
683
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
684
                                 (ei_version, constants.EXPORT_VERSION),
685
                                 errors.ECODE_ENVIRON)
686
    return export_info
687

    
688
  def _ReadExportParams(self, einfo):
689
    """Use export parameters as defaults.
690

691
    In case the opcode doesn't specify (as in override) some instance
692
    parameters, then try to use them from the export information, if
693
    that declares them.
694

695
    """
696
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
697

    
698
    if self.op.disk_template is None:
699
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
700
        self.op.disk_template = einfo.get(constants.INISECT_INS,
701
                                          "disk_template")
702
        if self.op.disk_template not in constants.DISK_TEMPLATES:
703
          raise errors.OpPrereqError("Disk template specified in configuration"
704
                                     " file is not one of the allowed values:"
705
                                     " %s" %
706
                                     " ".join(constants.DISK_TEMPLATES),
707
                                     errors.ECODE_INVAL)
708
      else:
709
        raise errors.OpPrereqError("No disk template specified and the export"
710
                                   " is missing the disk_template information",
711
                                   errors.ECODE_INVAL)
712

    
713
    if not self.op.disks:
714
      disks = []
715
      # TODO: import the disk iv_name too
716
      for idx in range(constants.MAX_DISKS):
717
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
718
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
719
          disks.append({constants.IDISK_SIZE: disk_sz})
720
      self.op.disks = disks
721
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
722
        raise errors.OpPrereqError("No disk info specified and the export"
723
                                   " is missing the disk information",
724
                                   errors.ECODE_INVAL)
725

    
726
    if not self.op.nics:
727
      nics = []
728
      for idx in range(constants.MAX_NICS):
729
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
730
          ndict = {}
731
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
732
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
733
            ndict[name] = v
734
          nics.append(ndict)
735
        else:
736
          break
737
      self.op.nics = nics
738

    
739
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
740
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
741

    
742
    if (self.op.hypervisor is None and
743
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
744
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
745

    
746
    if einfo.has_section(constants.INISECT_HYP):
747
      # use the export parameters but do not override the ones
748
      # specified by the user
749
      for name, value in einfo.items(constants.INISECT_HYP):
750
        if name not in self.op.hvparams:
751
          self.op.hvparams[name] = value
752

    
753
    if einfo.has_section(constants.INISECT_BEP):
754
      # use the parameters, without overriding
755
      for name, value in einfo.items(constants.INISECT_BEP):
756
        if name not in self.op.beparams:
757
          self.op.beparams[name] = value
758
        # Compatibility for the old "memory" be param
759
        if name == constants.BE_MEMORY:
760
          if constants.BE_MAXMEM not in self.op.beparams:
761
            self.op.beparams[constants.BE_MAXMEM] = value
762
          if constants.BE_MINMEM not in self.op.beparams:
763
            self.op.beparams[constants.BE_MINMEM] = value
764
    else:
765
      # try to read the parameters old style, from the main section
766
      for name in constants.BES_PARAMETERS:
767
        if (name not in self.op.beparams and
768
            einfo.has_option(constants.INISECT_INS, name)):
769
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
770

    
771
    if einfo.has_section(constants.INISECT_OSP):
772
      # use the parameters, without overriding
773
      for name, value in einfo.items(constants.INISECT_OSP):
774
        if name not in self.op.osparams:
775
          self.op.osparams[name] = value
776

    
777
  def _RevertToDefaults(self, cluster):
778
    """Revert the instance parameters to the default values.
779

780
    """
781
    # hvparams
782
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
783
    for name in self.op.hvparams.keys():
784
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
785
        del self.op.hvparams[name]
786
    # beparams
787
    be_defs = cluster.SimpleFillBE({})
788
    for name in self.op.beparams.keys():
789
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
790
        del self.op.beparams[name]
791
    # nic params
792
    nic_defs = cluster.SimpleFillNIC({})
793
    for nic in self.op.nics:
794
      for name in constants.NICS_PARAMETERS:
795
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
796
          del nic[name]
797
    # osparams
798
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
799
    for name in self.op.osparams.keys():
800
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
801
        del self.op.osparams[name]
802

    
803
  def _CalculateFileStorageDir(self):
804
    """Calculate final instance file storage dir.
805

806
    """
807
    # file storage dir calculation/check
808
    self.instance_file_storage_dir = None
809
    if self.op.disk_template in constants.DTS_FILEBASED:
810
      # build the full file storage dir path
811
      joinargs = []
812

    
813
      if self.op.disk_template == constants.DT_SHARED_FILE:
814
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
815
      else:
816
        get_fsd_fn = self.cfg.GetFileStorageDir
817

    
818
      cfg_storagedir = get_fsd_fn()
819
      if not cfg_storagedir:
820
        raise errors.OpPrereqError("Cluster file storage dir not defined",
821
                                   errors.ECODE_STATE)
822
      joinargs.append(cfg_storagedir)
823

    
824
      if self.op.file_storage_dir is not None:
825
        joinargs.append(self.op.file_storage_dir)
826

    
827
      joinargs.append(self.op.instance_name)
828

    
829
      # pylint: disable=W0142
830
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
831

    
832
  def CheckPrereq(self): # pylint: disable=R0914
833
    """Check prerequisites.
834

835
    """
836
    self._CalculateFileStorageDir()
837
    ec_id = self.proc.GetECId()
838

    
839
    if self.op.mode == constants.INSTANCE_IMPORT:
840
      export_info = self._ReadExportInfo()
841
      self._ReadExportParams(export_info)
842
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
843
    else:
844
      self._old_instance_name = None
845

    
846
    if (not self.cfg.GetVGName() and
847
        self.op.disk_template not in constants.DTS_NOT_LVM):
848
      raise errors.OpPrereqError("Cluster does not support lvm-based"
849
                                 " instances", errors.ECODE_STATE)
850

    
851
    if (self.op.hypervisor is None or
852
        self.op.hypervisor == constants.VALUE_AUTO):
853
      self.op.hypervisor = self.cfg.GetHypervisorType()
854

    
855
    cluster = self.cfg.GetClusterInfo()
856
    enabled_hvs = cluster.enabled_hypervisors
857
    if self.op.hypervisor not in enabled_hvs:
858
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
859
                                 " cluster (%s)" %
860
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
861
                                 errors.ECODE_STATE)
862

    
863
    # Check tag validity
864
    for tag in self.op.tags:
865
      objects.TaggableObject.ValidateTag(tag)
866

    
867
    # check hypervisor parameter syntax (locally)
868
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
869
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
870
                                      self.op.hvparams)
871
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
872
    hv_type.CheckParameterSyntax(filled_hvp)
873
    self.hv_full = filled_hvp
874
    # check that we don't specify global parameters on an instance
875
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
876
                         "instance", "cluster")
877

    
878
    # fill and remember the beparams dict
879
    self.be_full = _ComputeFullBeParams(self.op, cluster)
880

    
881
    # build os parameters
882
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
883

    
884
    # now that hvp/bep are in final format, let's reset to defaults,
885
    # if told to do so
886
    if self.op.identify_defaults:
887
      self._RevertToDefaults(cluster)
888

    
889
    # NIC buildup
890
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg, ec_id)
891

    
892
    # disk checks/pre-build
893
    default_vg = self.cfg.GetVGName()
894
    self.disks = ComputeDisks(self.op, default_vg)
895

    
896
    if self.op.mode == constants.INSTANCE_IMPORT:
897
      disk_images = []
898
      for idx in range(len(self.disks)):
899
        option = "disk%d_dump" % idx
900
        if export_info.has_option(constants.INISECT_INS, option):
901
          # FIXME: are the old os-es, disk sizes, etc. useful?
902
          export_name = export_info.get(constants.INISECT_INS, option)
903
          image = utils.PathJoin(self.op.src_path, export_name)
904
          disk_images.append(image)
905
        else:
906
          disk_images.append(False)
907

    
908
      self.src_images = disk_images
909

    
910
      if self.op.instance_name == self._old_instance_name:
911
        for idx, nic in enumerate(self.nics):
912
          if nic.mac == constants.VALUE_AUTO:
913
            nic_mac_ini = "nic%d_mac" % idx
914
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
915

    
916
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
917

    
918
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
919
    if self.op.ip_check:
920
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
921
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
922
                                   (self.check_ip, self.op.instance_name),
923
                                   errors.ECODE_NOTUNIQUE)
924

    
925
    #### mac address generation
926
    # By generating here the mac address both the allocator and the hooks get
927
    # the real final mac address rather than the 'auto' or 'generate' value.
928
    # There is a race condition between the generation and the instance object
929
    # creation, which means that we know the mac is valid now, but we're not
930
    # sure it will be when we actually add the instance. If things go bad
931
    # adding the instance will abort because of a duplicate mac, and the
932
    # creation job will fail.
933
    for nic in self.nics:
934
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
935
        nic.mac = self.cfg.GenerateMAC(nic.network, ec_id)
936

    
937
    #### allocator run
938

    
939
    if self.op.iallocator is not None:
940
      self._RunAllocator()
941

    
942
    # Release all unneeded node locks
943
    keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
944
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
945
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
946
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
947

    
948
    assert (self.owned_locks(locking.LEVEL_NODE) ==
949
            self.owned_locks(locking.LEVEL_NODE_RES)), \
950
      "Node locks differ from node resource locks"
951

    
952
    #### node related checks
953

    
954
    # check primary node
955
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
956
    assert self.pnode is not None, \
957
      "Cannot retrieve locked node %s" % self.op.pnode
958
    if pnode.offline:
959
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
960
                                 pnode.name, errors.ECODE_STATE)
961
    if pnode.drained:
962
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
963
                                 pnode.name, errors.ECODE_STATE)
964
    if not pnode.vm_capable:
965
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
966
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
967

    
968
    self.secondaries = []
969

    
970
    # Fill in any IPs from IP pools. This must happen here, because we need to
971
    # know the nic's primary node, as specified by the iallocator
972
    for idx, nic in enumerate(self.nics):
973
      net_uuid = nic.network
974
      if net_uuid is not None:
975
        nobj = self.cfg.GetNetwork(net_uuid)
976
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
977
        if netparams is None:
978
          raise errors.OpPrereqError("No netparams found for network"
979
                                     " %s. Propably not connected to"
980
                                     " node's %s nodegroup" %
981
                                     (nobj.name, self.pnode.name),
982
                                     errors.ECODE_INVAL)
983
        self.LogInfo("NIC/%d inherits netparams %s" %
984
                     (idx, netparams.values()))
985
        nic.nicparams = dict(netparams)
986
        if nic.ip is not None:
987
          if nic.ip.lower() == constants.NIC_IP_POOL:
988
            try:
989
              nic.ip = self.cfg.GenerateIp(net_uuid, ec_id)
990
            except errors.ReservationError:
991
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
992
                                         " from the address pool" % idx,
993
                                         errors.ECODE_STATE)
994
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
995
          else:
996
            self.cfg.ReserveIp(net_uuid, nic.ip, ec_id)
997

    
998
      # net is None, ip None or given
999
      elif self.op.conflicts_check:
1000
        _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1001

    
1002
    # mirror node verification
1003
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1004
      if self.op.snode == pnode.name:
1005
        raise errors.OpPrereqError("The secondary node cannot be the"
1006
                                   " primary node", errors.ECODE_INVAL)
1007
      CheckNodeOnline(self, self.op.snode)
1008
      CheckNodeNotDrained(self, self.op.snode)
1009
      CheckNodeVmCapable(self, self.op.snode)
1010
      self.secondaries.append(self.op.snode)
1011

    
1012
      snode = self.cfg.GetNodeInfo(self.op.snode)
1013
      if pnode.group != snode.group:
1014
        self.LogWarning("The primary and secondary nodes are in two"
1015
                        " different node groups; the disk parameters"
1016
                        " from the first disk's node group will be"
1017
                        " used")
1018

    
1019
    nodes = [pnode]
1020
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1021
      nodes.append(snode)
1022
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1023
    excl_stor = compat.any(map(has_es, nodes))
1024
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1025
      raise errors.OpPrereqError("Disk template %s not supported with"
1026
                                 " exclusive storage" % self.op.disk_template,
1027
                                 errors.ECODE_STATE)
1028
    for disk in self.disks:
1029
      CheckSpindlesExclusiveStorage(disk, excl_stor)
1030

    
1031
    nodenames = [pnode.name] + self.secondaries
1032

    
1033
    if not self.adopt_disks:
1034
      if self.op.disk_template == constants.DT_RBD:
1035
        # _CheckRADOSFreeSpace() is just a placeholder.
1036
        # Any function that checks prerequisites can be placed here.
1037
        # Check if there is enough space on the RADOS cluster.
1038
        CheckRADOSFreeSpace()
1039
      elif self.op.disk_template == constants.DT_EXT:
1040
        # FIXME: Function that checks prereqs if needed
1041
        pass
1042
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1043
        # Check lv size requirements, if not adopting
1044
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1045
        CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1046
      else:
1047
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1048
        pass
1049

    
1050
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1051
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1052
                                disk[constants.IDISK_ADOPT])
1053
                     for disk in self.disks])
1054
      if len(all_lvs) != len(self.disks):
1055
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1056
                                   errors.ECODE_INVAL)
1057
      for lv_name in all_lvs:
1058
        try:
1059
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1060
          # to ReserveLV uses the same syntax
1061
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1062
        except errors.ReservationError:
1063
          raise errors.OpPrereqError("LV named %s used by another instance" %
1064
                                     lv_name, errors.ECODE_NOTUNIQUE)
1065

    
1066
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1067
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1068

    
1069
      node_lvs = self.rpc.call_lv_list([pnode.name],
1070
                                       vg_names.payload.keys())[pnode.name]
1071
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1072
      node_lvs = node_lvs.payload
1073

    
1074
      delta = all_lvs.difference(node_lvs.keys())
1075
      if delta:
1076
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1077
                                   utils.CommaJoin(delta),
1078
                                   errors.ECODE_INVAL)
1079
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1080
      if online_lvs:
1081
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1082
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1083
                                   errors.ECODE_STATE)
1084
      # update the size of disk based on what is found
1085
      for dsk in self.disks:
1086
        dsk[constants.IDISK_SIZE] = \
1087
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1088
                                        dsk[constants.IDISK_ADOPT])][0]))
1089

    
1090
    elif self.op.disk_template == constants.DT_BLOCK:
1091
      # Normalize and de-duplicate device paths
1092
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1093
                       for disk in self.disks])
1094
      if len(all_disks) != len(self.disks):
1095
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1096
                                   errors.ECODE_INVAL)
1097
      baddisks = [d for d in all_disks
1098
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1099
      if baddisks:
1100
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1101
                                   " cannot be adopted" %
1102
                                   (utils.CommaJoin(baddisks),
1103
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1104
                                   errors.ECODE_INVAL)
1105

    
1106
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
1107
                                            list(all_disks))[pnode.name]
1108
      node_disks.Raise("Cannot get block device information from node %s" %
1109
                       pnode.name)
1110
      node_disks = node_disks.payload
1111
      delta = all_disks.difference(node_disks.keys())
1112
      if delta:
1113
        raise errors.OpPrereqError("Missing block device(s): %s" %
1114
                                   utils.CommaJoin(delta),
1115
                                   errors.ECODE_INVAL)
1116
      for dsk in self.disks:
1117
        dsk[constants.IDISK_SIZE] = \
1118
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1119

    
1120
    # Verify instance specs
1121
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1122
    ispec = {
1123
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1124
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1125
      constants.ISPEC_DISK_COUNT: len(self.disks),
1126
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1127
                                  for disk in self.disks],
1128
      constants.ISPEC_NIC_COUNT: len(self.nics),
1129
      constants.ISPEC_SPINDLE_USE: spindle_use,
1130
      }
1131

    
1132
    group_info = self.cfg.GetNodeGroup(pnode.group)
1133
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1134
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1135
                                               self.op.disk_template)
1136
    if not self.op.ignore_ipolicy and res:
1137
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1138
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1139
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1140

    
1141
    CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1142

    
1143
    CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1144
    # check OS parameters (remotely)
1145
    CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1146

    
1147
    CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1148

    
1149
    #TODO: _CheckExtParams (remotely)
1150
    # Check parameters for extstorage
1151

    
1152
    # memory check on primary node
1153
    #TODO(dynmem): use MINMEM for checking
1154
    if self.op.start:
1155
      CheckNodeFreeMemory(self, self.pnode.name,
1156
                          "creating instance %s" % self.op.instance_name,
1157
                          self.be_full[constants.BE_MAXMEM],
1158
                          self.op.hypervisor)
1159

    
1160
    self.dry_run_result = list(nodenames)
1161

    
1162
  def Exec(self, feedback_fn):
1163
    """Create and add the instance to the cluster.
1164

1165
    """
1166
    instance = self.op.instance_name
1167
    pnode_name = self.pnode.name
1168

    
1169
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1170
                self.owned_locks(locking.LEVEL_NODE)), \
1171
      "Node locks differ from node resource locks"
1172
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1173

    
1174
    ht_kind = self.op.hypervisor
1175
    if ht_kind in constants.HTS_REQ_PORT:
1176
      network_port = self.cfg.AllocatePort()
1177
    else:
1178
      network_port = None
1179

    
1180
    # This is ugly but we got a chicken-egg problem here
1181
    # We can only take the group disk parameters, as the instance
1182
    # has no disks yet (we are generating them right here).
1183
    node = self.cfg.GetNodeInfo(pnode_name)
1184
    nodegroup = self.cfg.GetNodeGroup(node.group)
1185
    disks = GenerateDiskTemplate(self,
1186
                                 self.op.disk_template,
1187
                                 instance, pnode_name,
1188
                                 self.secondaries,
1189
                                 self.disks,
1190
                                 self.instance_file_storage_dir,
1191
                                 self.op.file_driver,
1192
                                 0,
1193
                                 feedback_fn,
1194
                                 self.cfg.GetGroupDiskParams(nodegroup))
1195

    
1196
    iobj = objects.Instance(name=instance, os=self.op.os_type,
1197
                            primary_node=pnode_name,
1198
                            nics=self.nics, disks=disks,
1199
                            disk_template=self.op.disk_template,
1200
                            admin_state=constants.ADMINST_DOWN,
1201
                            network_port=network_port,
1202
                            beparams=self.op.beparams,
1203
                            hvparams=self.op.hvparams,
1204
                            hypervisor=self.op.hypervisor,
1205
                            osparams=self.op.osparams,
1206
                            )
1207

    
1208
    if self.op.tags:
1209
      for tag in self.op.tags:
1210
        iobj.AddTag(tag)
1211

    
1212
    if self.adopt_disks:
1213
      if self.op.disk_template == constants.DT_PLAIN:
1214
        # rename LVs to the newly-generated names; we need to construct
1215
        # 'fake' LV disks with the old data, plus the new unique_id
1216
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1217
        rename_to = []
1218
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1219
          rename_to.append(t_dsk.logical_id)
1220
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1221
          self.cfg.SetDiskID(t_dsk, pnode_name)
1222
        result = self.rpc.call_blockdev_rename(pnode_name,
1223
                                               zip(tmp_disks, rename_to))
1224
        result.Raise("Failed to rename adoped LVs")
1225
    else:
1226
      feedback_fn("* creating instance disks...")
1227
      try:
1228
        CreateDisks(self, iobj)
1229
      except errors.OpExecError:
1230
        self.LogWarning("Device creation failed")
1231
        self.cfg.ReleaseDRBDMinors(instance)
1232
        raise
1233

    
1234
    feedback_fn("adding instance %s to cluster config" % instance)
1235

    
1236
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1237

    
1238
    # Declare that we don't want to remove the instance lock anymore, as we've
1239
    # added the instance to the config
1240
    del self.remove_locks[locking.LEVEL_INSTANCE]
1241

    
1242
    if self.op.mode == constants.INSTANCE_IMPORT:
1243
      # Release unused nodes
1244
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1245
    else:
1246
      # Release all nodes
1247
      ReleaseLocks(self, locking.LEVEL_NODE)
1248

    
1249
    disk_abort = False
1250
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1251
      feedback_fn("* wiping instance disks...")
1252
      try:
1253
        WipeDisks(self, iobj)
1254
      except errors.OpExecError, err:
1255
        logging.exception("Wiping disks failed")
1256
        self.LogWarning("Wiping instance disks failed (%s)", err)
1257
        disk_abort = True
1258

    
1259
    if disk_abort:
1260
      # Something is already wrong with the disks, don't do anything else
1261
      pass
1262
    elif self.op.wait_for_sync:
1263
      disk_abort = not WaitForSync(self, iobj)
1264
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1265
      # make sure the disks are not degraded (still sync-ing is ok)
1266
      feedback_fn("* checking mirrors status")
1267
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1268
    else:
1269
      disk_abort = False
1270

    
1271
    if disk_abort:
1272
      RemoveDisks(self, iobj)
1273
      self.cfg.RemoveInstance(iobj.name)
1274
      # Make sure the instance lock gets removed
1275
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1276
      raise errors.OpExecError("There are some degraded disks for"
1277
                               " this instance")
1278

    
1279
    # Release all node resource locks
1280
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1281

    
1282
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1283
      # we need to set the disks ID to the primary node, since the
1284
      # preceding code might or might have not done it, depending on
1285
      # disk template and other options
1286
      for disk in iobj.disks:
1287
        self.cfg.SetDiskID(disk, pnode_name)
1288
      if self.op.mode == constants.INSTANCE_CREATE:
1289
        if not self.op.no_install:
1290
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1291
                        not self.op.wait_for_sync)
1292
          if pause_sync:
1293
            feedback_fn("* pausing disk sync to install instance OS")
1294
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1295
                                                              (iobj.disks,
1296
                                                               iobj), True)
1297
            for idx, success in enumerate(result.payload):
1298
              if not success:
1299
                logging.warn("pause-sync of instance %s for disk %d failed",
1300
                             instance, idx)
1301

    
1302
          feedback_fn("* running the instance OS create scripts...")
1303
          # FIXME: pass debug option from opcode to backend
1304
          os_add_result = \
1305
            self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1306
                                          self.op.debug_level)
1307
          if pause_sync:
1308
            feedback_fn("* resuming disk sync")
1309
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1310
                                                              (iobj.disks,
1311
                                                               iobj), False)
1312
            for idx, success in enumerate(result.payload):
1313
              if not success:
1314
                logging.warn("resume-sync of instance %s for disk %d failed",
1315
                             instance, idx)
1316

    
1317
          os_add_result.Raise("Could not add os for instance %s"
1318
                              " on node %s" % (instance, pnode_name))
1319

    
1320
      else:
1321
        if self.op.mode == constants.INSTANCE_IMPORT:
1322
          feedback_fn("* running the instance OS import scripts...")
1323

    
1324
          transfers = []
1325

    
1326
          for idx, image in enumerate(self.src_images):
1327
            if not image:
1328
              continue
1329

    
1330
            # FIXME: pass debug option from opcode to backend
1331
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1332
                                               constants.IEIO_FILE, (image, ),
1333
                                               constants.IEIO_SCRIPT,
1334
                                               (iobj.disks[idx], idx),
1335
                                               None)
1336
            transfers.append(dt)
1337

    
1338
          import_result = \
1339
            masterd.instance.TransferInstanceData(self, feedback_fn,
1340
                                                  self.op.src_node, pnode_name,
1341
                                                  self.pnode.secondary_ip,
1342
                                                  iobj, transfers)
1343
          if not compat.all(import_result):
1344
            self.LogWarning("Some disks for instance %s on node %s were not"
1345
                            " imported successfully" % (instance, pnode_name))
1346

    
1347
          rename_from = self._old_instance_name
1348

    
1349
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1350
          feedback_fn("* preparing remote import...")
1351
          # The source cluster will stop the instance before attempting to make
1352
          # a connection. In some cases stopping an instance can take a long
1353
          # time, hence the shutdown timeout is added to the connection
1354
          # timeout.
1355
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1356
                             self.op.source_shutdown_timeout)
1357
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1358

    
1359
          assert iobj.primary_node == self.pnode.name
1360
          disk_results = \
1361
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1362
                                          self.source_x509_ca,
1363
                                          self._cds, timeouts)
1364
          if not compat.all(disk_results):
1365
            # TODO: Should the instance still be started, even if some disks
1366
            # failed to import (valid for local imports, too)?
1367
            self.LogWarning("Some disks for instance %s on node %s were not"
1368
                            " imported successfully" % (instance, pnode_name))
1369

    
1370
          rename_from = self.source_instance_name
1371

    
1372
        else:
1373
          # also checked in the prereq part
1374
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1375
                                       % self.op.mode)
1376

    
1377
        # Run rename script on newly imported instance
1378
        assert iobj.name == instance
1379
        feedback_fn("Running rename script for %s" % instance)
1380
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1381
                                                   rename_from,
1382
                                                   self.op.debug_level)
1383
        if result.fail_msg:
1384
          self.LogWarning("Failed to run rename script for %s on node"
1385
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
1386

    
1387
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1388

    
1389
    if self.op.start:
1390
      iobj.admin_state = constants.ADMINST_UP
1391
      self.cfg.Update(iobj, feedback_fn)
1392
      logging.info("Starting instance %s on node %s", instance, pnode_name)
1393
      feedback_fn("* starting instance...")
1394
      result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1395
                                            False, self.op.reason)
1396
      result.Raise("Could not start instance")
1397

    
1398
    return list(iobj.all_nodes)
1399

    
1400

    
1401
class LUInstanceRename(LogicalUnit):
1402
  """Rename an instance.
1403

1404
  """
1405
  HPATH = "instance-rename"
1406
  HTYPE = constants.HTYPE_INSTANCE
1407

    
1408
  def CheckArguments(self):
1409
    """Check arguments.
1410

1411
    """
1412
    if self.op.ip_check and not self.op.name_check:
1413
      # TODO: make the ip check more flexible and not depend on the name check
1414
      raise errors.OpPrereqError("IP address check requires a name check",
1415
                                 errors.ECODE_INVAL)
1416

    
1417
  def BuildHooksEnv(self):
1418
    """Build hooks env.
1419

1420
    This runs on master, primary and secondary nodes of the instance.
1421

1422
    """
1423
    env = BuildInstanceHookEnvByObject(self, self.instance)
1424
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1425
    return env
1426

    
1427
  def BuildHooksNodes(self):
1428
    """Build hooks nodes.
1429

1430
    """
1431
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1432
    return (nl, nl)
1433

    
1434
  def CheckPrereq(self):
1435
    """Check prerequisites.
1436

1437
    This checks that the instance is in the cluster and is not running.
1438

1439
    """
1440
    self.op.instance_name = ExpandInstanceName(self.cfg,
1441
                                               self.op.instance_name)
1442
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1443
    assert instance is not None
1444
    CheckNodeOnline(self, instance.primary_node)
1445
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1446
                       msg="cannot rename")
1447
    self.instance = instance
1448

    
1449
    new_name = self.op.new_name
1450
    if self.op.name_check:
1451
      hostname = _CheckHostnameSane(self, new_name)
1452
      new_name = self.op.new_name = hostname.name
1453
      if (self.op.ip_check and
1454
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1455
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1456
                                   (hostname.ip, new_name),
1457
                                   errors.ECODE_NOTUNIQUE)
1458

    
1459
    instance_list = self.cfg.GetInstanceList()
1460
    if new_name in instance_list and new_name != instance.name:
1461
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1462
                                 new_name, errors.ECODE_EXISTS)
1463

    
1464
  def Exec(self, feedback_fn):
1465
    """Rename the instance.
1466

1467
    """
1468
    inst = self.instance
1469
    old_name = inst.name
1470

    
1471
    rename_file_storage = False
1472
    if (inst.disk_template in constants.DTS_FILEBASED and
1473
        self.op.new_name != inst.name):
1474
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1475
      rename_file_storage = True
1476

    
1477
    self.cfg.RenameInstance(inst.name, self.op.new_name)
1478
    # Change the instance lock. This is definitely safe while we hold the BGL.
1479
    # Otherwise the new lock would have to be added in acquired mode.
1480
    assert self.REQ_BGL
1481
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1482
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1483
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1484

    
1485
    # re-read the instance from the configuration after rename
1486
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
1487

    
1488
    if rename_file_storage:
1489
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1490
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1491
                                                     old_file_storage_dir,
1492
                                                     new_file_storage_dir)
1493
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1494
                   " (but the instance has been renamed in Ganeti)" %
1495
                   (inst.primary_node, old_file_storage_dir,
1496
                    new_file_storage_dir))
1497

    
1498
    StartInstanceDisks(self, inst, None)
1499
    # update info on disks
1500
    info = GetInstanceInfoText(inst)
1501
    for (idx, disk) in enumerate(inst.disks):
1502
      for node in inst.all_nodes:
1503
        self.cfg.SetDiskID(disk, node)
1504
        result = self.rpc.call_blockdev_setinfo(node, disk, info)
1505
        if result.fail_msg:
1506
          self.LogWarning("Error setting info on node %s for disk %s: %s",
1507
                          node, idx, result.fail_msg)
1508
    try:
1509
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1510
                                                 old_name, self.op.debug_level)
1511
      msg = result.fail_msg
1512
      if msg:
1513
        msg = ("Could not run OS rename script for instance %s on node %s"
1514
               " (but the instance has been renamed in Ganeti): %s" %
1515
               (inst.name, inst.primary_node, msg))
1516
        self.LogWarning(msg)
1517
    finally:
1518
      ShutdownInstanceDisks(self, inst)
1519

    
1520
    return inst.name
1521

    
1522

    
1523
class LUInstanceRemove(LogicalUnit):
1524
  """Remove an instance.
1525

1526
  """
1527
  HPATH = "instance-remove"
1528
  HTYPE = constants.HTYPE_INSTANCE
1529
  REQ_BGL = False
1530

    
1531
  def ExpandNames(self):
1532
    self._ExpandAndLockInstance()
1533
    self.needed_locks[locking.LEVEL_NODE] = []
1534
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1535
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1536

    
1537
  def DeclareLocks(self, level):
1538
    if level == locking.LEVEL_NODE:
1539
      self._LockInstancesNodes()
1540
    elif level == locking.LEVEL_NODE_RES:
1541
      # Copy node locks
1542
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1543
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1544

    
1545
  def BuildHooksEnv(self):
1546
    """Build hooks env.
1547

1548
    This runs on master, primary and secondary nodes of the instance.
1549

1550
    """
1551
    env = BuildInstanceHookEnvByObject(self, self.instance)
1552
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1553
    return env
1554

    
1555
  def BuildHooksNodes(self):
1556
    """Build hooks nodes.
1557

1558
    """
1559
    nl = [self.cfg.GetMasterNode()]
1560
    nl_post = list(self.instance.all_nodes) + nl
1561
    return (nl, nl_post)
1562

    
1563
  def CheckPrereq(self):
1564
    """Check prerequisites.
1565

1566
    This checks that the instance is in the cluster.
1567

1568
    """
1569
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1570
    assert self.instance is not None, \
1571
      "Cannot retrieve locked instance %s" % self.op.instance_name
1572

    
1573
  def Exec(self, feedback_fn):
1574
    """Remove the instance.
1575

1576
    """
1577
    instance = self.instance
1578
    logging.info("Shutting down instance %s on node %s",
1579
                 instance.name, instance.primary_node)
1580

    
1581
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1582
                                             self.op.shutdown_timeout,
1583
                                             self.op.reason)
1584
    msg = result.fail_msg
1585
    if msg:
1586
      if self.op.ignore_failures:
1587
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
1588
      else:
1589
        raise errors.OpExecError("Could not shutdown instance %s on"
1590
                                 " node %s: %s" %
1591
                                 (instance.name, instance.primary_node, msg))
1592

    
1593
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1594
            self.owned_locks(locking.LEVEL_NODE_RES))
1595
    assert not (set(instance.all_nodes) -
1596
                self.owned_locks(locking.LEVEL_NODE)), \
1597
      "Not owning correct locks"
1598

    
1599
    RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1600

    
1601

    
1602
class LUInstanceMove(LogicalUnit):
1603
  """Move an instance by data-copying.
1604

1605
  """
1606
  HPATH = "instance-move"
1607
  HTYPE = constants.HTYPE_INSTANCE
1608
  REQ_BGL = False
1609

    
1610
  def ExpandNames(self):
1611
    self._ExpandAndLockInstance()
1612
    target_node = ExpandNodeName(self.cfg, self.op.target_node)
1613
    self.op.target_node = target_node
1614
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
1615
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1616
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1617

    
1618
  def DeclareLocks(self, level):
1619
    if level == locking.LEVEL_NODE:
1620
      self._LockInstancesNodes(primary_only=True)
1621
    elif level == locking.LEVEL_NODE_RES:
1622
      # Copy node locks
1623
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1624
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1625

    
1626
  def BuildHooksEnv(self):
1627
    """Build hooks env.
1628

1629
    This runs on master, primary and secondary nodes of the instance.
1630

1631
    """
1632
    env = {
1633
      "TARGET_NODE": self.op.target_node,
1634
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1635
      }
1636
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1637
    return env
1638

    
1639
  def BuildHooksNodes(self):
1640
    """Build hooks nodes.
1641

1642
    """
1643
    nl = [
1644
      self.cfg.GetMasterNode(),
1645
      self.instance.primary_node,
1646
      self.op.target_node,
1647
      ]
1648
    return (nl, nl)
1649

    
1650
  def CheckPrereq(self):
1651
    """Check prerequisites.
1652

1653
    This checks that the instance is in the cluster.
1654

1655
    """
1656
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1657
    assert self.instance is not None, \
1658
      "Cannot retrieve locked instance %s" % self.op.instance_name
1659

    
1660
    if instance.disk_template not in constants.DTS_COPYABLE:
1661
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1662
                                 instance.disk_template, errors.ECODE_STATE)
1663

    
1664
    node = self.cfg.GetNodeInfo(self.op.target_node)
1665
    assert node is not None, \
1666
      "Cannot retrieve locked node %s" % self.op.target_node
1667

    
1668
    self.target_node = target_node = node.name
1669

    
1670
    if target_node == instance.primary_node:
1671
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1672
                                 (instance.name, target_node),
1673
                                 errors.ECODE_STATE)
1674

    
1675
    bep = self.cfg.GetClusterInfo().FillBE(instance)
1676

    
1677
    for idx, dsk in enumerate(instance.disks):
1678
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1679
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1680
                                   " cannot copy" % idx, errors.ECODE_STATE)
1681

    
1682
    CheckNodeOnline(self, target_node)
1683
    CheckNodeNotDrained(self, target_node)
1684
    CheckNodeVmCapable(self, target_node)
1685
    cluster = self.cfg.GetClusterInfo()
1686
    group_info = self.cfg.GetNodeGroup(node.group)
1687
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1688
    CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1689
                           ignore=self.op.ignore_ipolicy)
1690

    
1691
    if instance.admin_state == constants.ADMINST_UP:
1692
      # check memory requirements on the secondary node
1693
      CheckNodeFreeMemory(self, target_node,
1694
                          "failing over instance %s" %
1695
                          instance.name, bep[constants.BE_MAXMEM],
1696
                          instance.hypervisor)
1697
    else:
1698
      self.LogInfo("Not checking memory on the secondary node as"
1699
                   " instance will not be started")
1700

    
1701
    # check bridge existance
1702
    CheckInstanceBridgesExist(self, instance, node=target_node)
1703

    
1704
  def Exec(self, feedback_fn):
1705
    """Move an instance.
1706

1707
    The move is done by shutting it down on its present node, copying
1708
    the data over (slow) and starting it on the new node.
1709

1710
    """
1711
    instance = self.instance
1712

    
1713
    source_node = instance.primary_node
1714
    target_node = self.target_node
1715

    
1716
    self.LogInfo("Shutting down instance %s on source node %s",
1717
                 instance.name, source_node)
1718

    
1719
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1720
            self.owned_locks(locking.LEVEL_NODE_RES))
1721

    
1722
    result = self.rpc.call_instance_shutdown(source_node, instance,
1723
                                             self.op.shutdown_timeout,
1724
                                             self.op.reason)
1725
    msg = result.fail_msg
1726
    if msg:
1727
      if self.op.ignore_consistency:
1728
        self.LogWarning("Could not shutdown instance %s on node %s."
1729
                        " Proceeding anyway. Please make sure node"
1730
                        " %s is down. Error details: %s",
1731
                        instance.name, source_node, source_node, msg)
1732
      else:
1733
        raise errors.OpExecError("Could not shutdown instance %s on"
1734
                                 " node %s: %s" %
1735
                                 (instance.name, source_node, msg))
1736

    
1737
    # create the target disks
1738
    try:
1739
      CreateDisks(self, instance, target_node=target_node)
1740
    except errors.OpExecError:
1741
      self.LogWarning("Device creation failed")
1742
      self.cfg.ReleaseDRBDMinors(instance.name)
1743
      raise
1744

    
1745
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1746

    
1747
    errs = []
1748
    # activate, get path, copy the data over
1749
    for idx, disk in enumerate(instance.disks):
1750
      self.LogInfo("Copying data for disk %d", idx)
1751
      result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1752
                                               instance.name, True, idx)
1753
      if result.fail_msg:
1754
        self.LogWarning("Can't assemble newly created disk %d: %s",
1755
                        idx, result.fail_msg)
1756
        errs.append(result.fail_msg)
1757
        break
1758
      dev_path = result.payload
1759
      result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1760
                                             target_node, dev_path,
1761
                                             cluster_name)
1762
      if result.fail_msg:
1763
        self.LogWarning("Can't copy data over for disk %d: %s",
1764
                        idx, result.fail_msg)
1765
        errs.append(result.fail_msg)
1766
        break
1767

    
1768
    if errs:
1769
      self.LogWarning("Some disks failed to copy, aborting")
1770
      try:
1771
        RemoveDisks(self, instance, target_node=target_node)
1772
      finally:
1773
        self.cfg.ReleaseDRBDMinors(instance.name)
1774
        raise errors.OpExecError("Errors during disk copy: %s" %
1775
                                 (",".join(errs),))
1776

    
1777
    instance.primary_node = target_node
1778
    self.cfg.Update(instance, feedback_fn)
1779

    
1780
    self.LogInfo("Removing the disks on the original node")
1781
    RemoveDisks(self, instance, target_node=source_node)
1782

    
1783
    # Only start the instance if it's marked as up
1784
    if instance.admin_state == constants.ADMINST_UP:
1785
      self.LogInfo("Starting instance %s on node %s",
1786
                   instance.name, target_node)
1787

    
1788
      disks_ok, _ = AssembleInstanceDisks(self, instance,
1789
                                          ignore_secondaries=True)
1790
      if not disks_ok:
1791
        ShutdownInstanceDisks(self, instance)
1792
        raise errors.OpExecError("Can't activate the instance's disks")
1793

    
1794
      result = self.rpc.call_instance_start(target_node,
1795
                                            (instance, None, None), False,
1796
                                            self.op.reason)
1797
      msg = result.fail_msg
1798
      if msg:
1799
        ShutdownInstanceDisks(self, instance)
1800
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1801
                                 (instance.name, target_node, msg))
1802

    
1803

    
1804
class LUInstanceMultiAlloc(NoHooksLU):
1805
  """Allocates multiple instances at the same time.
1806

1807
  """
1808
  REQ_BGL = False
1809

    
1810
  def CheckArguments(self):
1811
    """Check arguments.
1812

1813
    """
1814
    nodes = []
1815
    for inst in self.op.instances:
1816
      if inst.iallocator is not None:
1817
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1818
                                   " instance objects", errors.ECODE_INVAL)
1819
      nodes.append(bool(inst.pnode))
1820
      if inst.disk_template in constants.DTS_INT_MIRROR:
1821
        nodes.append(bool(inst.snode))
1822

    
1823
    has_nodes = compat.any(nodes)
1824
    if compat.all(nodes) ^ has_nodes:
1825
      raise errors.OpPrereqError("There are instance objects providing"
1826
                                 " pnode/snode while others do not",
1827
                                 errors.ECODE_INVAL)
1828

    
1829
    if self.op.iallocator is None:
1830
      default_iallocator = self.cfg.GetDefaultIAllocator()
1831
      if default_iallocator and has_nodes:
1832
        self.op.iallocator = default_iallocator
1833
      else:
1834
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1835
                                   " given and no cluster-wide default"
1836
                                   " iallocator found; please specify either"
1837
                                   " an iallocator or nodes on the instances"
1838
                                   " or set a cluster-wide default iallocator",
1839
                                   errors.ECODE_INVAL)
1840

    
1841
    _CheckOpportunisticLocking(self.op)
1842

    
1843
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1844
    if dups:
1845
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1846
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1847

    
1848
  def ExpandNames(self):
1849
    """Calculate the locks.
1850

1851
    """
1852
    self.share_locks = ShareAll()
1853
    self.needed_locks = {
1854
      # iallocator will select nodes and even if no iallocator is used,
1855
      # collisions with LUInstanceCreate should be avoided
1856
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1857
      }
1858

    
1859
    if self.op.iallocator:
1860
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1861
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1862

    
1863
      if self.op.opportunistic_locking:
1864
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1865
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1866
    else:
1867
      nodeslist = []
1868
      for inst in self.op.instances:
1869
        inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1870
        nodeslist.append(inst.pnode)
1871
        if inst.snode is not None:
1872
          inst.snode = ExpandNodeName(self.cfg, inst.snode)
1873
          nodeslist.append(inst.snode)
1874

    
1875
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1876
      # Lock resources of instance's primary and secondary nodes (copy to
1877
      # prevent accidential modification)
1878
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1879

    
1880
  def CheckPrereq(self):
1881
    """Check prerequisite.
1882

1883
    """
1884
    cluster = self.cfg.GetClusterInfo()
1885
    default_vg = self.cfg.GetVGName()
1886
    ec_id = self.proc.GetECId()
1887

    
1888
    if self.op.opportunistic_locking:
1889
      # Only consider nodes for which a lock is held
1890
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1891
    else:
1892
      node_whitelist = None
1893

    
1894
    insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1895
                                         _ComputeNics(op, cluster, None,
1896
                                                      self.cfg, ec_id),
1897
                                         _ComputeFullBeParams(op, cluster),
1898
                                         node_whitelist)
1899
             for op in self.op.instances]
1900

    
1901
    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1902
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1903

    
1904
    ial.Run(self.op.iallocator)
1905

    
1906
    if not ial.success:
1907
      raise errors.OpPrereqError("Can't compute nodes using"
1908
                                 " iallocator '%s': %s" %
1909
                                 (self.op.iallocator, ial.info),
1910
                                 errors.ECODE_NORES)
1911

    
1912
    self.ia_result = ial.result
1913

    
1914
    if self.op.dry_run:
1915
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1916
        constants.JOB_IDS_KEY: [],
1917
        })
1918

    
1919
  def _ConstructPartialResult(self):
1920
    """Contructs the partial result.
1921

1922
    """
1923
    (allocatable, failed) = self.ia_result
1924
    return {
1925
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1926
        map(compat.fst, allocatable),
1927
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1928
      }
1929

    
1930
  def Exec(self, feedback_fn):
1931
    """Executes the opcode.
1932

1933
    """
1934
    op2inst = dict((op.instance_name, op) for op in self.op.instances)
1935
    (allocatable, failed) = self.ia_result
1936

    
1937
    jobs = []
1938
    for (name, nodes) in allocatable:
1939
      op = op2inst.pop(name)
1940

    
1941
      if len(nodes) > 1:
1942
        (op.pnode, op.snode) = nodes
1943
      else:
1944
        (op.pnode,) = nodes
1945

    
1946
      jobs.append([op])
1947

    
1948
    missing = set(op2inst.keys()) - set(failed)
1949
    assert not missing, \
1950
      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1951

    
1952
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
1953

    
1954

    
1955
class _InstNicModPrivate:
1956
  """Data structure for network interface modifications.
1957

1958
  Used by L{LUInstanceSetParams}.
1959

1960
  """
1961
  def __init__(self):
1962
    self.params = None
1963
    self.filled = None
1964

    
1965

    
1966
def _PrepareContainerMods(mods, private_fn):
1967
  """Prepares a list of container modifications by adding a private data field.
1968

1969
  @type mods: list of tuples; (operation, index, parameters)
1970
  @param mods: List of modifications
1971
  @type private_fn: callable or None
1972
  @param private_fn: Callable for constructing a private data field for a
1973
    modification
1974
  @rtype: list
1975

1976
  """
1977
  if private_fn is None:
1978
    fn = lambda: None
1979
  else:
1980
    fn = private_fn
1981

    
1982
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
1983

    
1984

    
1985
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
1986
  """Checks if nodes have enough physical CPUs
1987

1988
  This function checks if all given nodes have the needed number of
1989
  physical CPUs. In case any node has less CPUs or we cannot get the
1990
  information from the node, this function raises an OpPrereqError
1991
  exception.
1992

1993
  @type lu: C{LogicalUnit}
1994
  @param lu: a logical unit from which we get configuration data
1995
  @type nodenames: C{list}
1996
  @param nodenames: the list of node names to check
1997
  @type requested: C{int}
1998
  @param requested: the minimum acceptable number of physical CPUs
1999
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2000
      or we cannot check the node
2001

2002
  """
2003
  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2004
  for node in nodenames:
2005
    info = nodeinfo[node]
2006
    info.Raise("Cannot get current information from node %s" % node,
2007
               prereq=True, ecode=errors.ECODE_ENVIRON)
2008
    (_, _, (hv_info, )) = info.payload
2009
    num_cpus = hv_info.get("cpu_total", None)
2010
    if not isinstance(num_cpus, int):
2011
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2012
                                 " on node %s, result was '%s'" %
2013
                                 (node, num_cpus), errors.ECODE_ENVIRON)
2014
    if requested > num_cpus:
2015
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2016
                                 "required" % (node, num_cpus, requested),
2017
                                 errors.ECODE_NORES)
2018

    
2019

    
2020
def GetItemFromContainer(identifier, kind, container):
2021
  """Return the item refered by the identifier.
2022

2023
  @type identifier: string
2024
  @param identifier: Item index or name or UUID
2025
  @type kind: string
2026
  @param kind: One-word item description
2027
  @type container: list
2028
  @param container: Container to get the item from
2029

2030
  """
2031
  # Index
2032
  try:
2033
    idx = int(identifier)
2034
    if idx == -1:
2035
      # Append
2036
      absidx = len(container) - 1
2037
    elif idx < 0:
2038
      raise IndexError("Not accepting negative indices other than -1")
2039
    elif idx > len(container):
2040
      raise IndexError("Got %s index %s, but there are only %s" %
2041
                       (kind, idx, len(container)))
2042
    else:
2043
      absidx = idx
2044
    return (absidx, container[idx])
2045
  except ValueError:
2046
    pass
2047

    
2048
  for idx, item in enumerate(container):
2049
    if item.uuid == identifier or item.name == identifier:
2050
      return (idx, item)
2051

    
2052
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2053
                             (kind, identifier), errors.ECODE_NOENT)
2054

    
2055

    
2056
def _ApplyContainerMods(kind, container, chgdesc, mods,
2057
                        create_fn, modify_fn, remove_fn):
2058
  """Applies descriptions in C{mods} to C{container}.
2059

2060
  @type kind: string
2061
  @param kind: One-word item description
2062
  @type container: list
2063
  @param container: Container to modify
2064
  @type chgdesc: None or list
2065
  @param chgdesc: List of applied changes
2066
  @type mods: list
2067
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2068
  @type create_fn: callable
2069
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2070
    receives absolute item index, parameters and private data object as added
2071
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2072
    as list
2073
  @type modify_fn: callable
2074
  @param modify_fn: Callback for modifying an existing item
2075
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2076
    and private data object as added by L{_PrepareContainerMods}, returns
2077
    changes as list
2078
  @type remove_fn: callable
2079
  @param remove_fn: Callback on removing item; receives absolute item index,
2080
    item and private data object as added by L{_PrepareContainerMods}
2081

2082
  """
2083
  for (op, identifier, params, private) in mods:
2084
    changes = None
2085

    
2086
    if op == constants.DDM_ADD:
2087
      # Calculate where item will be added
2088
      # When adding an item, identifier can only be an index
2089
      try:
2090
        idx = int(identifier)
2091
      except ValueError:
2092
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2093
                                   " identifier for %s" % constants.DDM_ADD,
2094
                                   errors.ECODE_INVAL)
2095
      if idx == -1:
2096
        addidx = len(container)
2097
      else:
2098
        if idx < 0:
2099
          raise IndexError("Not accepting negative indices other than -1")
2100
        elif idx > len(container):
2101
          raise IndexError("Got %s index %s, but there are only %s" %
2102
                           (kind, idx, len(container)))
2103
        addidx = idx
2104

    
2105
      if create_fn is None:
2106
        item = params
2107
      else:
2108
        (item, changes) = create_fn(addidx, params, private)
2109

    
2110
      if idx == -1:
2111
        container.append(item)
2112
      else:
2113
        assert idx >= 0
2114
        assert idx <= len(container)
2115
        # list.insert does so before the specified index
2116
        container.insert(idx, item)
2117
    else:
2118
      # Retrieve existing item
2119
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2120

    
2121
      if op == constants.DDM_REMOVE:
2122
        assert not params
2123

    
2124
        if remove_fn is not None:
2125
          remove_fn(absidx, item, private)
2126

    
2127
        changes = [("%s/%s" % (kind, absidx), "remove")]
2128

    
2129
        assert container[absidx] == item
2130
        del container[absidx]
2131
      elif op == constants.DDM_MODIFY:
2132
        if modify_fn is not None:
2133
          changes = modify_fn(absidx, item, params, private)
2134
      else:
2135
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2136

    
2137
    assert _TApplyContModsCbChanges(changes)
2138

    
2139
    if not (chgdesc is None or changes is None):
2140
      chgdesc.extend(changes)
2141

    
2142

    
2143
def _UpdateIvNames(base_index, disks):
2144
  """Updates the C{iv_name} attribute of disks.
2145

2146
  @type disks: list of L{objects.Disk}
2147

2148
  """
2149
  for (idx, disk) in enumerate(disks):
2150
    disk.iv_name = "disk/%s" % (base_index + idx, )
2151

    
2152

    
2153
class LUInstanceSetParams(LogicalUnit):
2154
  """Modifies an instances's parameters.
2155

2156
  """
2157
  HPATH = "instance-modify"
2158
  HTYPE = constants.HTYPE_INSTANCE
2159
  REQ_BGL = False
2160

    
2161
  @staticmethod
2162
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2163
    assert ht.TList(mods)
2164
    assert not mods or len(mods[0]) in (2, 3)
2165

    
2166
    if mods and len(mods[0]) == 2:
2167
      result = []
2168

    
2169
      addremove = 0
2170
      for op, params in mods:
2171
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2172
          result.append((op, -1, params))
2173
          addremove += 1
2174

    
2175
          if addremove > 1:
2176
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2177
                                       " supported at a time" % kind,
2178
                                       errors.ECODE_INVAL)
2179
        else:
2180
          result.append((constants.DDM_MODIFY, op, params))
2181

    
2182
      assert verify_fn(result)
2183
    else:
2184
      result = mods
2185

    
2186
    return result
2187

    
2188
  @staticmethod
2189
  def _CheckMods(kind, mods, key_types, item_fn):
2190
    """Ensures requested disk/NIC modifications are valid.
2191

2192
    """
2193
    for (op, _, params) in mods:
2194
      assert ht.TDict(params)
2195

    
2196
      # If 'key_types' is an empty dict, we assume we have an
2197
      # 'ext' template and thus do not ForceDictType
2198
      if key_types:
2199
        utils.ForceDictType(params, key_types)
2200

    
2201
      if op == constants.DDM_REMOVE:
2202
        if params:
2203
          raise errors.OpPrereqError("No settings should be passed when"
2204
                                     " removing a %s" % kind,
2205
                                     errors.ECODE_INVAL)
2206
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2207
        item_fn(op, params)
2208
      else:
2209
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2210

    
2211
  @staticmethod
2212
  def _VerifyDiskModification(op, params, excl_stor):
2213
    """Verifies a disk modification.
2214

2215
    """
2216
    if op == constants.DDM_ADD:
2217
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2218
      if mode not in constants.DISK_ACCESS_SET:
2219
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2220
                                   errors.ECODE_INVAL)
2221

    
2222
      size = params.get(constants.IDISK_SIZE, None)
2223
      if size is None:
2224
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2225
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2226

    
2227
      try:
2228
        size = int(size)
2229
      except (TypeError, ValueError), err:
2230
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2231
                                   errors.ECODE_INVAL)
2232

    
2233
      params[constants.IDISK_SIZE] = size
2234
      name = params.get(constants.IDISK_NAME, None)
2235
      if name is not None and name.lower() == constants.VALUE_NONE:
2236
        params[constants.IDISK_NAME] = None
2237

    
2238
      CheckSpindlesExclusiveStorage(params, excl_stor)
2239

    
2240
    elif op == constants.DDM_MODIFY:
2241
      if constants.IDISK_SIZE in params:
2242
        raise errors.OpPrereqError("Disk size change not possible, use"
2243
                                   " grow-disk", errors.ECODE_INVAL)
2244
      if len(params) > 2:
2245
        raise errors.OpPrereqError("Disk modification doesn't support"
2246
                                   " additional arbitrary parameters",
2247
                                   errors.ECODE_INVAL)
2248
      name = params.get(constants.IDISK_NAME, None)
2249
      if name is not None and name.lower() == constants.VALUE_NONE:
2250
        params[constants.IDISK_NAME] = None
2251

    
2252
  @staticmethod
2253
  def _VerifyNicModification(op, params):
2254
    """Verifies a network interface modification.
2255

2256
    """
2257
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2258
      ip = params.get(constants.INIC_IP, None)
2259
      name = params.get(constants.INIC_NAME, None)
2260
      req_net = params.get(constants.INIC_NETWORK, None)
2261
      link = params.get(constants.NIC_LINK, None)
2262
      mode = params.get(constants.NIC_MODE, None)
2263
      if name is not None and name.lower() == constants.VALUE_NONE:
2264
        params[constants.INIC_NAME] = None
2265
      if req_net is not None:
2266
        if req_net.lower() == constants.VALUE_NONE:
2267
          params[constants.INIC_NETWORK] = None
2268
          req_net = None
2269
        elif link is not None or mode is not None:
2270
          raise errors.OpPrereqError("If network is given"
2271
                                     " mode or link should not",
2272
                                     errors.ECODE_INVAL)
2273

    
2274
      if op == constants.DDM_ADD:
2275
        macaddr = params.get(constants.INIC_MAC, None)
2276
        if macaddr is None:
2277
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2278

    
2279
      if ip is not None:
2280
        if ip.lower() == constants.VALUE_NONE:
2281
          params[constants.INIC_IP] = None
2282
        else:
2283
          if ip.lower() == constants.NIC_IP_POOL:
2284
            if op == constants.DDM_ADD and req_net is None:
2285
              raise errors.OpPrereqError("If ip=pool, parameter network"
2286
                                         " cannot be none",
2287
                                         errors.ECODE_INVAL)
2288
          else:
2289
            if not netutils.IPAddress.IsValid(ip):
2290
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2291
                                         errors.ECODE_INVAL)
2292

    
2293
      if constants.INIC_MAC in params:
2294
        macaddr = params[constants.INIC_MAC]
2295
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2296
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2297

    
2298
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2299
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2300
                                     " modifying an existing NIC",
2301
                                     errors.ECODE_INVAL)
2302

    
2303
  def CheckArguments(self):
2304
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2305
            self.op.hvparams or self.op.beparams or self.op.os_name or
2306
            self.op.offline is not None or self.op.runtime_mem or
2307
            self.op.pnode):
2308
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2309

    
2310
    if self.op.hvparams:
2311
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2312
                           "hypervisor", "instance", "cluster")
2313

    
2314
    self.op.disks = self._UpgradeDiskNicMods(
2315
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2316
    self.op.nics = self._UpgradeDiskNicMods(
2317
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2318

    
2319
    if self.op.disks and self.op.disk_template is not None:
2320
      raise errors.OpPrereqError("Disk template conversion and other disk"
2321
                                 " changes not supported at the same time",
2322
                                 errors.ECODE_INVAL)
2323

    
2324
    if (self.op.disk_template and
2325
        self.op.disk_template in constants.DTS_INT_MIRROR and
2326
        self.op.remote_node is None):
2327
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2328
                                 " one requires specifying a secondary node",
2329
                                 errors.ECODE_INVAL)
2330

    
2331
    # Check NIC modifications
2332
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2333
                    self._VerifyNicModification)
2334

    
2335
    if self.op.pnode:
2336
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2337

    
2338
  def ExpandNames(self):
2339
    self._ExpandAndLockInstance()
2340
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2341
    # Can't even acquire node locks in shared mode as upcoming changes in
2342
    # Ganeti 2.6 will start to modify the node object on disk conversion
2343
    self.needed_locks[locking.LEVEL_NODE] = []
2344
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2345
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2346
    # Look node group to look up the ipolicy
2347
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2348

    
2349
  def DeclareLocks(self, level):
2350
    if level == locking.LEVEL_NODEGROUP:
2351
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2352
      # Acquire locks for the instance's nodegroups optimistically. Needs
2353
      # to be verified in CheckPrereq
2354
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2355
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2356
    elif level == locking.LEVEL_NODE:
2357
      self._LockInstancesNodes()
2358
      if self.op.disk_template and self.op.remote_node:
2359
        self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2360
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2361
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2362
      # Copy node locks
2363
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2364
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2365

    
2366
  def BuildHooksEnv(self):
2367
    """Build hooks env.
2368

2369
    This runs on the master, primary and secondaries.
2370

2371
    """
2372
    args = {}
2373
    if constants.BE_MINMEM in self.be_new:
2374
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2375
    if constants.BE_MAXMEM in self.be_new:
2376
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2377
    if constants.BE_VCPUS in self.be_new:
2378
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2379
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2380
    # information at all.
2381

    
2382
    if self._new_nics is not None:
2383
      nics = []
2384

    
2385
      for nic in self._new_nics:
2386
        n = copy.deepcopy(nic)
2387
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2388
        n.nicparams = nicparams
2389
        nics.append(NICToTuple(self, n))
2390

    
2391
      args["nics"] = nics
2392

    
2393
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2394
    if self.op.disk_template:
2395
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2396
    if self.op.runtime_mem:
2397
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2398

    
2399
    return env
2400

    
2401
  def BuildHooksNodes(self):
2402
    """Build hooks nodes.
2403

2404
    """
2405
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2406
    return (nl, nl)
2407

    
2408
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2409
                              old_params, cluster, pnode):
2410

    
2411
    update_params_dict = dict([(key, params[key])
2412
                               for key in constants.NICS_PARAMETERS
2413
                               if key in params])
2414

    
2415
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2416
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2417

    
2418
    new_net_uuid = None
2419
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2420
    if new_net_uuid_or_name:
2421
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2422
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2423

    
2424
    if old_net_uuid:
2425
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2426

    
2427
    if new_net_uuid:
2428
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2429
      if not netparams:
2430
        raise errors.OpPrereqError("No netparams found for the network"
2431
                                   " %s, probably not connected" %
2432
                                   new_net_obj.name, errors.ECODE_INVAL)
2433
      new_params = dict(netparams)
2434
    else:
2435
      new_params = GetUpdatedParams(old_params, update_params_dict)
2436

    
2437
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2438

    
2439
    new_filled_params = cluster.SimpleFillNIC(new_params)
2440
    objects.NIC.CheckParameterSyntax(new_filled_params)
2441

    
2442
    new_mode = new_filled_params[constants.NIC_MODE]
2443
    if new_mode == constants.NIC_MODE_BRIDGED:
2444
      bridge = new_filled_params[constants.NIC_LINK]
2445
      msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2446
      if msg:
2447
        msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2448
        if self.op.force:
2449
          self.warn.append(msg)
2450
        else:
2451
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2452

    
2453
    elif new_mode == constants.NIC_MODE_ROUTED:
2454
      ip = params.get(constants.INIC_IP, old_ip)
2455
      if ip is None:
2456
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2457
                                   " on a routed NIC", errors.ECODE_INVAL)
2458

    
2459
    elif new_mode == constants.NIC_MODE_OVS:
2460
      # TODO: check OVS link
2461
      self.LogInfo("OVS links are currently not checked for correctness")
2462

    
2463
    if constants.INIC_MAC in params:
2464
      mac = params[constants.INIC_MAC]
2465
      if mac is None:
2466
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2467
                                   errors.ECODE_INVAL)
2468
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2469
        # otherwise generate the MAC address
2470
        params[constants.INIC_MAC] = \
2471
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2472
      else:
2473
        # or validate/reserve the current one
2474
        try:
2475
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2476
        except errors.ReservationError:
2477
          raise errors.OpPrereqError("MAC address '%s' already in use"
2478
                                     " in cluster" % mac,
2479
                                     errors.ECODE_NOTUNIQUE)
2480
    elif new_net_uuid != old_net_uuid:
2481

    
2482
      def get_net_prefix(net_uuid):
2483
        mac_prefix = None
2484
        if net_uuid:
2485
          nobj = self.cfg.GetNetwork(net_uuid)
2486
          mac_prefix = nobj.mac_prefix
2487

    
2488
        return mac_prefix
2489

    
2490
      new_prefix = get_net_prefix(new_net_uuid)
2491
      old_prefix = get_net_prefix(old_net_uuid)
2492
      if old_prefix != new_prefix:
2493
        params[constants.INIC_MAC] = \
2494
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2495

    
2496
    # if there is a change in (ip, network) tuple
2497
    new_ip = params.get(constants.INIC_IP, old_ip)
2498
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2499
      if new_ip:
2500
        # if IP is pool then require a network and generate one IP
2501
        if new_ip.lower() == constants.NIC_IP_POOL:
2502
          if new_net_uuid:
2503
            try:
2504
              new_ip = self.cfg.GenerateIp(new_net_uuid, ec_id)
2505
            except errors.ReservationError:
2506
              raise errors.OpPrereqError("Unable to get a free IP"
2507
                                         " from the address pool",
2508
                                         errors.ECODE_STATE)
2509
            self.LogInfo("Chose IP %s from network %s",
2510
                         new_ip, new_net_obj.name)
2511
            params[constants.INIC_IP] = new_ip
2512
          else:
2513
            raise errors.OpPrereqError("ip=pool, but no network found",
2514
                                       errors.ECODE_INVAL)
2515
        # Reserve new IP if in the new network if any
2516
        elif new_net_uuid:
2517
          self.cfg.ReserveIp(new_net_uuid, new_ip, ec_id)
2518
        # new network is None so check if new IP is a conflicting IP
2519
        elif self.op.conflicts_check:
2520
          _CheckForConflictingIp(self, new_ip, pnode)
2521

    
2522
      # release old IP if old network is not None
2523
      if old_ip and old_net_uuid:
2524
        self.cfg.ReleaseIp(old_net_uuid, old_ip, False, ec_id)
2525

    
2526
    # there are no changes in (ip, network) tuple and old network is not None
2527
    elif (old_net_uuid is not None and
2528
          (req_link is not None or req_mode is not None)):
2529
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2530
                                 " a NIC that is connected to a network",
2531
                                 errors.ECODE_INVAL)
2532

    
2533
    private.params = new_params
2534
    private.filled = new_filled_params
2535

    
2536
  def _PreCheckDiskTemplate(self, pnode_info):
2537
    """CheckPrereq checks related to a new disk template."""
2538
    # Arguments are passed to avoid configuration lookups
2539
    instance = self.instance
2540
    pnode = instance.primary_node
2541
    cluster = self.cluster
2542
    if instance.disk_template == self.op.disk_template:
2543
      raise errors.OpPrereqError("Instance already has disk template %s" %
2544
                                 instance.disk_template, errors.ECODE_INVAL)
2545

    
2546
    if (instance.disk_template,
2547
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2548
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2549
                                 " %s to %s" % (instance.disk_template,
2550
                                                self.op.disk_template),
2551
                                 errors.ECODE_INVAL)
2552
    CheckInstanceState(self, instance, INSTANCE_DOWN,
2553
                       msg="cannot change disk template")
2554
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2555
      if self.op.remote_node == pnode:
2556
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2557
                                   " as the primary node of the instance" %
2558
                                   self.op.remote_node, errors.ECODE_STATE)
2559
      CheckNodeOnline(self, self.op.remote_node)
2560
      CheckNodeNotDrained(self, self.op.remote_node)
2561
      # FIXME: here we assume that the old instance type is DT_PLAIN
2562
      assert instance.disk_template == constants.DT_PLAIN
2563
      disks = [{constants.IDISK_SIZE: d.size,
2564
                constants.IDISK_VG: d.logical_id[0]}
2565
               for d in instance.disks]
2566
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2567
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2568

    
2569
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2570
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2571
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2572
                                                              snode_group)
2573
      CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2574
                             ignore=self.op.ignore_ipolicy)
2575
      if pnode_info.group != snode_info.group:
2576
        self.LogWarning("The primary and secondary nodes are in two"
2577
                        " different node groups; the disk parameters"
2578
                        " from the first disk's node group will be"
2579
                        " used")
2580

    
2581
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2582
      # Make sure none of the nodes require exclusive storage
2583
      nodes = [pnode_info]
2584
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2585
        assert snode_info
2586
        nodes.append(snode_info)
2587
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2588
      if compat.any(map(has_es, nodes)):
2589
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2590
                  " storage is enabled" % (instance.disk_template,
2591
                                           self.op.disk_template))
2592
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2593

    
2594
  def _PreCheckDisks(self, ispec):
2595
    """CheckPrereq checks related to disk changes.
2596

2597
    @type ispec: dict
2598
    @param ispec: instance specs to be updated with the new disks
2599

2600
    """
2601
    instance = self.instance
2602
    self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2603

    
2604
    excl_stor = compat.any(
2605
      rpc.GetExclusiveStorageForNodeNames(self.cfg, instance.all_nodes).values()
2606
      )
2607

    
2608
    # Check disk modifications. This is done here and not in CheckArguments
2609
    # (as with NICs), because we need to know the instance's disk template
2610
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2611
    if instance.disk_template == constants.DT_EXT:
2612
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2613
    else:
2614
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2615
                      ver_fn)
2616

    
2617
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2618

    
2619
    # Check the validity of the `provider' parameter
2620
    if instance.disk_template in constants.DT_EXT:
2621
      for mod in self.diskmod:
2622
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2623
        if mod[0] == constants.DDM_ADD:
2624
          if ext_provider is None:
2625
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2626
                                       " '%s' missing, during disk add" %
2627
                                       (constants.DT_EXT,
2628
                                        constants.IDISK_PROVIDER),
2629
                                       errors.ECODE_NOENT)
2630
        elif mod[0] == constants.DDM_MODIFY:
2631
          if ext_provider:
2632
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2633
                                       " modification" %
2634
                                       constants.IDISK_PROVIDER,
2635
                                       errors.ECODE_INVAL)
2636
    else:
2637
      for mod in self.diskmod:
2638
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2639
        if ext_provider is not None:
2640
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2641
                                     " instances of type '%s'" %
2642
                                     (constants.IDISK_PROVIDER,
2643
                                      constants.DT_EXT),
2644
                                     errors.ECODE_INVAL)
2645

    
2646
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2647
      raise errors.OpPrereqError("Disk operations not supported for"
2648
                                 " diskless instances", errors.ECODE_INVAL)
2649

    
2650
    def _PrepareDiskMod(_, disk, params, __):
2651
      disk.name = params.get(constants.IDISK_NAME, None)
2652

    
2653
    # Verify disk changes (operating on a copy)
2654
    disks = copy.deepcopy(instance.disks)
2655
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2656
                        _PrepareDiskMod, None)
2657
    utils.ValidateDeviceNames("disk", disks)
2658
    if len(disks) > constants.MAX_DISKS:
2659
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2660
                                 " more" % constants.MAX_DISKS,
2661
                                 errors.ECODE_STATE)
2662
    disk_sizes = [disk.size for disk in instance.disks]
2663
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2664
                      self.diskmod if op == constants.DDM_ADD)
2665
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2666
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2667

    
2668
    if self.op.offline is not None and self.op.offline:
2669
      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2670
                         msg="can't change to offline")
2671

    
2672
  def CheckPrereq(self):
2673
    """Check prerequisites.
2674

2675
    This only checks the instance list against the existing names.
2676

2677
    """
2678
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2679
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2680

    
2681
    cluster = self.cluster = self.cfg.GetClusterInfo()
2682
    assert self.instance is not None, \
2683
      "Cannot retrieve locked instance %s" % self.op.instance_name
2684

    
2685
    pnode = instance.primary_node
2686

    
2687
    self.warn = []
2688

    
2689
    if (self.op.pnode is not None and self.op.pnode != pnode and
2690
        not self.op.force):
2691
      # verify that the instance is not up
2692
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2693
                                                  instance.hypervisor)
2694
      if instance_info.fail_msg:
2695
        self.warn.append("Can't get instance runtime information: %s" %
2696
                         instance_info.fail_msg)
2697
      elif instance_info.payload:
2698
        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2699
                                   errors.ECODE_STATE)
2700

    
2701
    assert pnode in self.owned_locks(locking.LEVEL_NODE)
2702
    nodelist = list(instance.all_nodes)
2703
    pnode_info = self.cfg.GetNodeInfo(pnode)
2704

    
2705
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2706
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2707
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2708

    
2709
    # dictionary with instance information after the modification
2710
    ispec = {}
2711

    
2712
    # Prepare NIC modifications
2713
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2714

    
2715
    # OS change
2716
    if self.op.os_name and not self.op.force:
2717
      CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2718
                     self.op.force_variant)
2719
      instance_os = self.op.os_name
2720
    else:
2721
      instance_os = instance.os
2722

    
2723
    assert not (self.op.disk_template and self.op.disks), \
2724
      "Can't modify disk template and apply disk changes at the same time"
2725

    
2726
    if self.op.disk_template:
2727
      self._PreCheckDiskTemplate(pnode_info)
2728

    
2729
    self._PreCheckDisks(ispec)
2730

    
2731
    # hvparams processing
2732
    if self.op.hvparams:
2733
      hv_type = instance.hypervisor
2734
      i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2735
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2736
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2737

    
2738
      # local check
2739
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2740
      CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2741
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2742
      self.hv_inst = i_hvdict # the new dict (without defaults)
2743
    else:
2744
      self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2745
                                              instance.hvparams)
2746
      self.hv_new = self.hv_inst = {}
2747

    
2748
    # beparams processing
2749
    if self.op.beparams:
2750
      i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2751
                                  use_none=True)
2752
      objects.UpgradeBeParams(i_bedict)
2753
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2754
      be_new = cluster.SimpleFillBE(i_bedict)
2755
      self.be_proposed = self.be_new = be_new # the new actual values
2756
      self.be_inst = i_bedict # the new dict (without defaults)
2757
    else:
2758
      self.be_new = self.be_inst = {}
2759
      self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2760
    be_old = cluster.FillBE(instance)
2761

    
2762
    # CPU param validation -- checking every time a parameter is
2763
    # changed to cover all cases where either CPU mask or vcpus have
2764
    # changed
2765
    if (constants.BE_VCPUS in self.be_proposed and
2766
        constants.HV_CPU_MASK in self.hv_proposed):
2767
      cpu_list = \
2768
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2769
      # Verify mask is consistent with number of vCPUs. Can skip this
2770
      # test if only 1 entry in the CPU mask, which means same mask
2771
      # is applied to all vCPUs.
2772
      if (len(cpu_list) > 1 and
2773
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2774
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2775
                                   " CPU mask [%s]" %
2776
                                   (self.be_proposed[constants.BE_VCPUS],
2777
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2778
                                   errors.ECODE_INVAL)
2779

    
2780
      # Only perform this test if a new CPU mask is given
2781
      if constants.HV_CPU_MASK in self.hv_new:
2782
        # Calculate the largest CPU number requested
2783
        max_requested_cpu = max(map(max, cpu_list))
2784
        # Check that all of the instance's nodes have enough physical CPUs to
2785
        # satisfy the requested CPU mask
2786
        _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2787
                                max_requested_cpu + 1, instance.hypervisor)
2788

    
2789
    # osparams processing
2790
    if self.op.osparams:
2791
      i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2792
      CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2793
      self.os_inst = i_osdict # the new dict (without defaults)
2794
    else:
2795
      self.os_inst = {}
2796

    
2797
    #TODO(dynmem): do the appropriate check involving MINMEM
2798
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2799
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2800
      mem_check_list = [pnode]
2801
      if be_new[constants.BE_AUTO_BALANCE]:
2802
        # either we changed auto_balance to yes or it was from before
2803
        mem_check_list.extend(instance.secondary_nodes)
2804
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2805
                                                  instance.hypervisor)
2806
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2807
                                         [instance.hypervisor], False)
2808
      pninfo = nodeinfo[pnode]
2809
      msg = pninfo.fail_msg
2810
      if msg:
2811
        # Assume the primary node is unreachable and go ahead
2812
        self.warn.append("Can't get info from primary node %s: %s" %
2813
                         (pnode, msg))
2814
      else:
2815
        (_, _, (pnhvinfo, )) = pninfo.payload
2816
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2817
          self.warn.append("Node data from primary node %s doesn't contain"
2818
                           " free memory information" % pnode)
2819
        elif instance_info.fail_msg:
2820
          self.warn.append("Can't get instance runtime information: %s" %
2821
                           instance_info.fail_msg)
2822
        else:
2823
          if instance_info.payload:
2824
            current_mem = int(instance_info.payload["memory"])
2825
          else:
2826
            # Assume instance not running
2827
            # (there is a slight race condition here, but it's not very
2828
            # probable, and we have no other way to check)
2829
            # TODO: Describe race condition
2830
            current_mem = 0
2831
          #TODO(dynmem): do the appropriate check involving MINMEM
2832
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2833
                      pnhvinfo["memory_free"])
2834
          if miss_mem > 0:
2835
            raise errors.OpPrereqError("This change will prevent the instance"
2836
                                       " from starting, due to %d MB of memory"
2837
                                       " missing on its primary node" %
2838
                                       miss_mem, errors.ECODE_NORES)
2839

    
2840
      if be_new[constants.BE_AUTO_BALANCE]:
2841
        for node, nres in nodeinfo.items():
2842
          if node not in instance.secondary_nodes:
2843
            continue
2844
          nres.Raise("Can't get info from secondary node %s" % node,
2845
                     prereq=True, ecode=errors.ECODE_STATE)
2846
          (_, _, (nhvinfo, )) = nres.payload
2847
          if not isinstance(nhvinfo.get("memory_free", None), int):
2848
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2849
                                       " memory information" % node,
2850
                                       errors.ECODE_STATE)
2851
          #TODO(dynmem): do the appropriate check involving MINMEM
2852
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2853
            raise errors.OpPrereqError("This change will prevent the instance"
2854
                                       " from failover to its secondary node"
2855
                                       " %s, due to not enough memory" % node,
2856
                                       errors.ECODE_STATE)
2857

    
2858
    if self.op.runtime_mem:
2859
      remote_info = self.rpc.call_instance_info(instance.primary_node,
2860
                                                instance.name,
2861
                                                instance.hypervisor)
2862
      remote_info.Raise("Error checking node %s" % instance.primary_node)
2863
      if not remote_info.payload: # not running already
2864
        raise errors.OpPrereqError("Instance %s is not running" %
2865
                                   instance.name, errors.ECODE_STATE)
2866

    
2867
      current_memory = remote_info.payload["memory"]
2868
      if (not self.op.force and
2869
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2870
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2871
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2872
                                   " and %d MB of memory unless --force is"
2873
                                   " given" %
2874
                                   (instance.name,
2875
                                    self.be_proposed[constants.BE_MINMEM],
2876
                                    self.be_proposed[constants.BE_MAXMEM]),
2877
                                   errors.ECODE_INVAL)
2878

    
2879
      delta = self.op.runtime_mem - current_memory
2880
      if delta > 0:
2881
        CheckNodeFreeMemory(self, instance.primary_node,
2882
                            "ballooning memory for instance %s" %
2883
                            instance.name, delta, instance.hypervisor)
2884

    
2885
    def _PrepareNicCreate(_, params, private):
2886
      self._PrepareNicModification(params, private, None, None,
2887
                                   {}, cluster, pnode)
2888
      return (None, None)
2889

    
2890
    def _PrepareNicMod(_, nic, params, private):
2891
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2892
                                   nic.nicparams, cluster, pnode)
2893
      return None
2894

    
2895
    def _PrepareNicRemove(_, params, __):
2896
      ip = params.ip
2897
      net = params.network
2898
      if net is not None and ip is not None:
2899
        self.cfg.ReleaseIp(net, ip, False, self.proc.GetECId())
2900

    
2901
    # Verify NIC changes (operating on copy)
2902
    nics = instance.nics[:]
2903
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2904
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2905
    if len(nics) > constants.MAX_NICS:
2906
      raise errors.OpPrereqError("Instance has too many network interfaces"
2907
                                 " (%d), cannot add more" % constants.MAX_NICS,
2908
                                 errors.ECODE_STATE)
2909

    
2910
    # Pre-compute NIC changes (necessary to use result in hooks)
2911
    self._nic_chgdesc = []
2912
    if self.nicmod:
2913
      # Operate on copies as this is still in prereq
2914
      nics = [nic.Copy() for nic in instance.nics]
2915
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2916
                          self._CreateNewNic, self._ApplyNicMods, None)
2917
      # Verify that NIC names are unique and valid
2918
      utils.ValidateDeviceNames("NIC", nics)
2919
      self._new_nics = nics
2920
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2921
    else:
2922
      self._new_nics = None
2923
      ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2924

    
2925
    if not self.op.ignore_ipolicy:
2926
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2927
                                                              group_info)
2928

    
2929
      # Fill ispec with backend parameters
2930
      ispec[constants.ISPEC_SPINDLE_USE] = \
2931
        self.be_new.get(constants.BE_SPINDLE_USE, None)
2932
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2933
                                                         None)
2934

    
2935
      # Copy ispec to verify parameters with min/max values separately
2936
      if self.op.disk_template:
2937
        new_disk_template = self.op.disk_template
2938
      else:
2939
        new_disk_template = instance.disk_template
2940
      ispec_max = ispec.copy()
2941
      ispec_max[constants.ISPEC_MEM_SIZE] = \
2942
        self.be_new.get(constants.BE_MAXMEM, None)
2943
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
2944
                                                     new_disk_template)
2945
      ispec_min = ispec.copy()
2946
      ispec_min[constants.ISPEC_MEM_SIZE] = \
2947
        self.be_new.get(constants.BE_MINMEM, None)
2948
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
2949
                                                     new_disk_template)
2950

    
2951
      if (res_max or res_min):
2952
        # FIXME: Improve error message by including information about whether
2953
        # the upper or lower limit of the parameter fails the ipolicy.
2954
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
2955
               (group_info, group_info.name,
2956
                utils.CommaJoin(set(res_max + res_min))))
2957
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2958

    
2959
  def _ConvertPlainToDrbd(self, feedback_fn):
2960
    """Converts an instance from plain to drbd.
2961

2962
    """
2963
    feedback_fn("Converting template to drbd")
2964
    instance = self.instance
2965
    pnode = instance.primary_node
2966
    snode = self.op.remote_node
2967

    
2968
    assert instance.disk_template == constants.DT_PLAIN
2969

    
2970
    # create a fake disk info for _GenerateDiskTemplate
2971
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
2972
                  constants.IDISK_VG: d.logical_id[0],
2973
                  constants.IDISK_NAME: d.name}
2974
                 for d in instance.disks]
2975
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
2976
                                     instance.name, pnode, [snode],
2977
                                     disk_info, None, None, 0, feedback_fn,
2978
                                     self.diskparams)
2979
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
2980
                                        self.diskparams)
2981
    p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
2982
    s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
2983
    info = GetInstanceInfoText(instance)
2984
    feedback_fn("Creating additional volumes...")
2985
    # first, create the missing data and meta devices
2986
    for disk in anno_disks:
2987
      # unfortunately this is... not too nice
2988
      CreateSingleBlockDev(self, pnode, instance, disk.children[1],
2989
                           info, True, p_excl_stor)
2990
      for child in disk.children:
2991
        CreateSingleBlockDev(self, snode, instance, child, info, True,
2992
                             s_excl_stor)
2993
    # at this stage, all new LVs have been created, we can rename the
2994
    # old ones
2995
    feedback_fn("Renaming original volumes...")
2996
    rename_list = [(o, n.children[0].logical_id)
2997
                   for (o, n) in zip(instance.disks, new_disks)]
2998
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
2999
    result.Raise("Failed to rename original LVs")
3000

    
3001
    feedback_fn("Initializing DRBD devices...")
3002
    # all child devices are in place, we can now create the DRBD devices
3003
    try:
3004
      for disk in anno_disks:
3005
        for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3006
          f_create = node == pnode
3007
          CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3008
                               excl_stor)
3009
    except errors.GenericError, e:
3010
      feedback_fn("Initializing of DRBD devices failed;"
3011
                  " renaming back original volumes...")
3012
      for disk in new_disks:
3013
        self.cfg.SetDiskID(disk, pnode)
3014
      rename_back_list = [(n.children[0], o.logical_id)
3015
                          for (n, o) in zip(new_disks, instance.disks)]
3016
      result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3017
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3018
      raise
3019

    
3020
    # at this point, the instance has been modified
3021
    instance.disk_template = constants.DT_DRBD8
3022
    instance.disks = new_disks
3023
    self.cfg.Update(instance, feedback_fn)
3024

    
3025
    # Release node locks while waiting for sync
3026
    ReleaseLocks(self, locking.LEVEL_NODE)
3027

    
3028
    # disks are created, waiting for sync
3029
    disk_abort = not WaitForSync(self, instance,
3030
                                 oneshot=not self.op.wait_for_sync)
3031
    if disk_abort:
3032
      raise errors.OpExecError("There are some degraded disks for"
3033
                               " this instance, please cleanup manually")
3034

    
3035
    # Node resource locks will be released by caller
3036

    
3037
  def _ConvertDrbdToPlain(self, feedback_fn):
3038
    """Converts an instance from drbd to plain.
3039

3040
    """
3041
    instance = self.instance
3042

    
3043
    assert len(instance.secondary_nodes) == 1
3044
    assert instance.disk_template == constants.DT_DRBD8
3045

    
3046
    pnode = instance.primary_node
3047
    snode = instance.secondary_nodes[0]
3048
    feedback_fn("Converting template to plain")
3049

    
3050
    old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3051
    new_disks = [d.children[0] for d in instance.disks]
3052

    
3053
    # copy over size, mode and name
3054
    for parent, child in zip(old_disks, new_disks):
3055
      child.size = parent.size
3056
      child.mode = parent.mode
3057
      child.name = parent.name
3058

    
3059
    # this is a DRBD disk, return its port to the pool
3060
    # NOTE: this must be done right before the call to cfg.Update!
3061
    for disk in old_disks:
3062
      tcp_port = disk.logical_id[2]
3063
      self.cfg.AddTcpUdpPort(tcp_port)
3064

    
3065
    # update instance structure
3066
    instance.disks = new_disks
3067
    instance.disk_template = constants.DT_PLAIN
3068
    _UpdateIvNames(0, instance.disks)
3069
    self.cfg.Update(instance, feedback_fn)
3070

    
3071
    # Release locks in case removing disks takes a while
3072
    ReleaseLocks(self, locking.LEVEL_NODE)
3073

    
3074
    feedback_fn("Removing volumes on the secondary node...")
3075
    for disk in old_disks:
3076
      self.cfg.SetDiskID(disk, snode)
3077
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3078
      if msg:
3079
        self.LogWarning("Could not remove block device %s on node %s,"
3080
                        " continuing anyway: %s", disk.iv_name, snode, msg)
3081

    
3082
    feedback_fn("Removing unneeded volumes on the primary node...")
3083
    for idx, disk in enumerate(old_disks):
3084
      meta = disk.children[1]
3085
      self.cfg.SetDiskID(meta, pnode)
3086
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3087
      if msg:
3088
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3089
                        " continuing anyway: %s", idx, pnode, msg)
3090

    
3091
  def _CreateNewDisk(self, idx, params, _):
3092
    """Creates a new disk.
3093

3094
    """
3095
    instance = self.instance
3096

    
3097
    # add a new disk
3098
    if instance.disk_template in constants.DTS_FILEBASED:
3099
      (file_driver, file_path) = instance.disks[0].logical_id
3100
      file_path = os.path.dirname(file_path)
3101
    else:
3102
      file_driver = file_path = None
3103

    
3104
    disk = \
3105
      GenerateDiskTemplate(self, instance.disk_template, instance.name,
3106
                           instance.primary_node, instance.secondary_nodes,
3107
                           [params], file_path, file_driver, idx,
3108
                           self.Log, self.diskparams)[0]
3109

    
3110
    new_disks = CreateDisks(self, instance, disks=[disk])
3111

    
3112
    if self.cluster.prealloc_wipe_disks:
3113
      # Wipe new disk
3114
      WipeOrCleanupDisks(self, instance,
3115
                         disks=[(idx, disk, 0)],
3116
                         cleanup=new_disks)
3117

    
3118
    return (disk, [
3119
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3120
      ])
3121

    
3122
  @staticmethod
3123
  def _ModifyDisk(idx, disk, params, _):
3124
    """Modifies a disk.
3125

3126
    """
3127
    changes = []
3128
    mode = params.get(constants.IDISK_MODE, None)
3129
    if mode:
3130
      disk.mode = mode
3131
      changes.append(("disk.mode/%d" % idx, disk.mode))
3132

    
3133
    name = params.get(constants.IDISK_NAME, None)
3134
    disk.name = name
3135
    changes.append(("disk.name/%d" % idx, disk.name))
3136

    
3137
    return changes
3138

    
3139
  def _RemoveDisk(self, idx, root, _):
3140
    """Removes a disk.
3141

3142
    """
3143
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3144
    for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3145
      self.cfg.SetDiskID(disk, node)
3146
      msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3147
      if msg:
3148
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3149
                        " continuing anyway", idx, node, msg)
3150

    
3151
    # if this is a DRBD disk, return its port to the pool
3152
    if root.dev_type in constants.LDS_DRBD:
3153
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3154

    
3155
  def _CreateNewNic(self, idx, params, private):
3156
    """Creates data structure for a new network interface.
3157

3158
    """
3159
    mac = params[constants.INIC_MAC]
3160
    ip = params.get(constants.INIC_IP, None)
3161
    net = params.get(constants.INIC_NETWORK, None)
3162
    name = params.get(constants.INIC_NAME, None)
3163
    net_uuid = self.cfg.LookupNetwork(net)
3164
    #TODO: not private.filled?? can a nic have no nicparams??
3165
    nicparams = private.filled
3166
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3167
                       nicparams=nicparams)
3168
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3169

    
3170
    return (nobj, [
3171
      ("nic.%d" % idx,
3172
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3173
       (mac, ip, private.filled[constants.NIC_MODE],
3174
       private.filled[constants.NIC_LINK],
3175
       net)),
3176
      ])
3177

    
3178
  def _ApplyNicMods(self, idx, nic, params, private):
3179
    """Modifies a network interface.
3180

3181
    """
3182
    changes = []
3183

    
3184
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3185
      if key in params:
3186
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3187
        setattr(nic, key, params[key])
3188

    
3189
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3190
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3191
    if new_net_uuid != nic.network:
3192
      changes.append(("nic.network/%d" % idx, new_net))
3193
      nic.network = new_net_uuid
3194

    
3195
    if private.filled:
3196
      nic.nicparams = private.filled
3197

    
3198
      for (key, val) in nic.nicparams.items():
3199
        changes.append(("nic.%s/%d" % (key, idx), val))
3200

    
3201
    return changes
3202

    
3203
  def Exec(self, feedback_fn):
3204
    """Modifies an instance.
3205

3206
    All parameters take effect only at the next restart of the instance.
3207

3208
    """
3209
    # Process here the warnings from CheckPrereq, as we don't have a
3210
    # feedback_fn there.
3211
    # TODO: Replace with self.LogWarning
3212
    for warn in self.warn:
3213
      feedback_fn("WARNING: %s" % warn)
3214

    
3215
    assert ((self.op.disk_template is None) ^
3216
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3217
      "Not owning any node resource locks"
3218

    
3219
    result = []
3220
    instance = self.instance
3221

    
3222
    # New primary node
3223
    if self.op.pnode:
3224
      instance.primary_node = self.op.pnode
3225

    
3226
    # runtime memory
3227
    if self.op.runtime_mem:
3228
      rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3229
                                                     instance,
3230
                                                     self.op.runtime_mem)
3231
      rpcres.Raise("Cannot modify instance runtime memory")
3232
      result.append(("runtime_memory", self.op.runtime_mem))
3233

    
3234
    # Apply disk changes
3235
    _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3236
                        self._CreateNewDisk, self._ModifyDisk,
3237
                        self._RemoveDisk)
3238
    _UpdateIvNames(0, instance.disks)
3239

    
3240
    if self.op.disk_template:
3241
      if __debug__:
3242
        check_nodes = set(instance.all_nodes)
3243
        if self.op.remote_node:
3244
          check_nodes.add(self.op.remote_node)
3245
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3246
          owned = self.owned_locks(level)
3247
          assert not (check_nodes - owned), \
3248
            ("Not owning the correct locks, owning %r, expected at least %r" %
3249
             (owned, check_nodes))
3250

    
3251
      r_shut = ShutdownInstanceDisks(self, instance)
3252
      if not r_shut:
3253
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3254
                                 " proceed with disk template conversion")
3255
      mode = (instance.disk_template, self.op.disk_template)
3256
      try:
3257
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3258
      except:
3259
        self.cfg.ReleaseDRBDMinors(instance.name)
3260
        raise
3261
      result.append(("disk_template", self.op.disk_template))
3262

    
3263
      assert instance.disk_template == self.op.disk_template, \
3264
        ("Expected disk template '%s', found '%s'" %
3265
         (self.op.disk_template, instance.disk_template))
3266

    
3267
    # Release node and resource locks if there are any (they might already have
3268
    # been released during disk conversion)
3269
    ReleaseLocks(self, locking.LEVEL_NODE)
3270
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3271

    
3272
    # Apply NIC changes
3273
    if self._new_nics is not None:
3274
      instance.nics = self._new_nics
3275
      result.extend(self._nic_chgdesc)
3276

    
3277
    # hvparams changes
3278
    if self.op.hvparams:
3279
      instance.hvparams = self.hv_inst
3280
      for key, val in self.op.hvparams.iteritems():
3281
        result.append(("hv/%s" % key, val))
3282

    
3283
    # beparams changes
3284
    if self.op.beparams:
3285
      instance.beparams = self.be_inst
3286
      for key, val in self.op.beparams.iteritems():
3287
        result.append(("be/%s" % key, val))
3288

    
3289
    # OS change
3290
    if self.op.os_name:
3291
      instance.os = self.op.os_name
3292

    
3293
    # osparams changes
3294
    if self.op.osparams:
3295
      instance.osparams = self.os_inst
3296
      for key, val in self.op.osparams.iteritems():
3297
        result.append(("os/%s" % key, val))
3298

    
3299
    if self.op.offline is None:
3300
      # Ignore
3301
      pass
3302
    elif self.op.offline:
3303
      # Mark instance as offline
3304
      self.cfg.MarkInstanceOffline(instance.name)
3305
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3306
    else:
3307
      # Mark instance as online, but stopped
3308
      self.cfg.MarkInstanceDown(instance.name)
3309
      result.append(("admin_state", constants.ADMINST_DOWN))
3310

    
3311
    self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3312

    
3313
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3314
                self.owned_locks(locking.LEVEL_NODE)), \
3315
      "All node locks should have been released by now"
3316

    
3317
    return result
3318

    
3319
  _DISK_CONVERSIONS = {
3320
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3321
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3322
    }
3323

    
3324

    
3325
class LUInstanceChangeGroup(LogicalUnit):
3326
  HPATH = "instance-change-group"
3327
  HTYPE = constants.HTYPE_INSTANCE
3328
  REQ_BGL = False
3329

    
3330
  def ExpandNames(self):
3331
    self.share_locks = ShareAll()
3332

    
3333
    self.needed_locks = {
3334
      locking.LEVEL_NODEGROUP: [],
3335
      locking.LEVEL_NODE: [],
3336
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3337
      }
3338

    
3339
    self._ExpandAndLockInstance()
3340

    
3341
    if self.op.target_groups:
3342
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3343
                                  self.op.target_groups)
3344
    else:
3345
      self.req_target_uuids = None
3346

    
3347
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3348

    
3349
  def DeclareLocks(self, level):
3350
    if level == locking.LEVEL_NODEGROUP:
3351
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3352

    
3353
      if self.req_target_uuids:
3354
        lock_groups = set(self.req_target_uuids)
3355

    
3356
        # Lock all groups used by instance optimistically; this requires going
3357
        # via the node before it's locked, requiring verification later on
3358
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3359
        lock_groups.update(instance_groups)
3360
      else:
3361
        # No target groups, need to lock all of them
3362
        lock_groups = locking.ALL_SET
3363

    
3364
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3365

    
3366
    elif level == locking.LEVEL_NODE:
3367
      if self.req_target_uuids:
3368
        # Lock all nodes used by instances
3369
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3370
        self._LockInstancesNodes()
3371

    
3372
        # Lock all nodes in all potential target groups
3373
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3374
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3375
        member_nodes = [node_name
3376
                        for group in lock_groups
3377
                        for node_name in self.cfg.GetNodeGroup(group).members]
3378
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3379
      else:
3380
        # Lock all nodes as all groups are potential targets
3381
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3382

    
3383
  def CheckPrereq(self):
3384
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3385
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3386
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3387

    
3388
    assert (self.req_target_uuids is None or
3389
            owned_groups.issuperset(self.req_target_uuids))
3390
    assert owned_instances == set([self.op.instance_name])
3391

    
3392
    # Get instance information
3393
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3394

    
3395
    # Check if node groups for locked instance are still correct
3396
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3397
      ("Instance %s's nodes changed while we kept the lock" %
3398
       self.op.instance_name)
3399

    
3400
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3401
                                          owned_groups)
3402

    
3403
    if self.req_target_uuids:
3404
      # User requested specific target groups
3405
      self.target_uuids = frozenset(self.req_target_uuids)
3406
    else:
3407
      # All groups except those used by the instance are potential targets
3408
      self.target_uuids = owned_groups - inst_groups
3409

    
3410
    conflicting_groups = self.target_uuids & inst_groups
3411
    if conflicting_groups:
3412
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3413
                                 " used by the instance '%s'" %
3414
                                 (utils.CommaJoin(conflicting_groups),
3415
                                  self.op.instance_name),
3416
                                 errors.ECODE_INVAL)
3417

    
3418
    if not self.target_uuids:
3419
      raise errors.OpPrereqError("There are no possible target groups",
3420
                                 errors.ECODE_INVAL)
3421

    
3422
  def BuildHooksEnv(self):
3423
    """Build hooks env.
3424

3425
    """
3426
    assert self.target_uuids
3427

    
3428
    env = {
3429
      "TARGET_GROUPS": " ".join(self.target_uuids),
3430
      }
3431

    
3432
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3433

    
3434
    return env
3435

    
3436
  def BuildHooksNodes(self):
3437
    """Build hooks nodes.
3438

3439
    """
3440
    mn = self.cfg.GetMasterNode()
3441
    return ([mn], [mn])
3442

    
3443
  def Exec(self, feedback_fn):
3444
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3445

    
3446
    assert instances == [self.op.instance_name], "Instance not locked"
3447

    
3448
    req = iallocator.IAReqGroupChange(instances=instances,
3449
                                      target_groups=list(self.target_uuids))
3450
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3451

    
3452
    ial.Run(self.op.iallocator)
3453

    
3454
    if not ial.success:
3455
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3456
                                 " instance '%s' using iallocator '%s': %s" %
3457
                                 (self.op.instance_name, self.op.iallocator,
3458
                                  ial.info), errors.ECODE_NORES)
3459

    
3460
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3461

    
3462
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3463
                 " instance '%s'", len(jobs), self.op.instance_name)
3464

    
3465
    return ResultWithJobs(jobs)