Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ c7dd65be

History | View | Annotate | Download (134.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176

    
177
    if net is None or net.lower() == constants.VALUE_NONE:
178
      net = None
179
    else:
180
      if nic_mode_req is not None or link is not None:
181
        raise errors.OpPrereqError("If network is given, no mode or link"
182
                                   " is allowed to be passed",
183
                                   errors.ECODE_INVAL)
184

    
185
    # ip validity checks
186
    if ip is None or ip.lower() == constants.VALUE_NONE:
187
      nic_ip = None
188
    elif ip.lower() == constants.VALUE_AUTO:
189
      if not op.name_check:
190
        raise errors.OpPrereqError("IP address set to auto but name checks"
191
                                   " have been skipped",
192
                                   errors.ECODE_INVAL)
193
      nic_ip = default_ip
194
    else:
195
      # We defer pool operations until later, so that the iallocator has
196
      # filled in the instance's node(s) dimara
197
      if ip.lower() == constants.NIC_IP_POOL:
198
        if net is None:
199
          raise errors.OpPrereqError("if ip=pool, parameter network"
200
                                     " must be passed too",
201
                                     errors.ECODE_INVAL)
202

    
203
      elif not netutils.IPAddress.IsValid(ip):
204
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
205
                                   errors.ECODE_INVAL)
206

    
207
      nic_ip = ip
208

    
209
    # TODO: check the ip address for uniqueness
210
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
211
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
212
                                 errors.ECODE_INVAL)
213

    
214
    # MAC address verification
215
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
216
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
217
      mac = utils.NormalizeAndValidateMac(mac)
218

    
219
      try:
220
        # TODO: We need to factor this out
221
        cfg.ReserveMAC(mac, ec_id)
222
      except errors.ReservationError:
223
        raise errors.OpPrereqError("MAC address %s already in use"
224
                                   " in cluster" % mac,
225
                                   errors.ECODE_NOTUNIQUE)
226

    
227
    #  Build nic parameters
228
    nicparams = {}
229
    if nic_mode_req:
230
      nicparams[constants.NIC_MODE] = nic_mode
231
    if link:
232
      nicparams[constants.NIC_LINK] = link
233

    
234
    check_params = cluster.SimpleFillNIC(nicparams)
235
    objects.NIC.CheckParameterSyntax(check_params)
236
    net_uuid = cfg.LookupNetwork(net)
237
    name = nic.get(constants.INIC_NAME, None)
238
    if name is not None and name.lower() == constants.VALUE_NONE:
239
      name = None
240
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
241
                          network=net_uuid, nicparams=nicparams)
242
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
243
    nics.append(nic_obj)
244

    
245
  return nics
246

    
247

    
248
def _CheckForConflictingIp(lu, ip, node):
249
  """In case of conflicting IP address raise error.
250

251
  @type ip: string
252
  @param ip: IP address
253
  @type node: string
254
  @param node: node name
255

256
  """
257
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
258
  if conf_net is not None:
259
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
260
                                " network %s, but the target NIC does not." %
261
                                (ip, conf_net)),
262
                               errors.ECODE_STATE)
263

    
264
  return (None, None)
265

    
266

    
267
def _ComputeIPolicyInstanceSpecViolation(
268
  ipolicy, instance_spec, disk_template,
269
  _compute_fn=ComputeIPolicySpecViolation):
270
  """Compute if instance specs meets the specs of ipolicy.
271

272
  @type ipolicy: dict
273
  @param ipolicy: The ipolicy to verify against
274
  @param instance_spec: dict
275
  @param instance_spec: The instance spec to verify
276
  @type disk_template: string
277
  @param disk_template: the disk template of the instance
278
  @param _compute_fn: The function to verify ipolicy (unittest only)
279
  @see: L{ComputeIPolicySpecViolation}
280

281
  """
282
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
283
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
284
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
285
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
286
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
287
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
288

    
289
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
290
                     disk_sizes, spindle_use, disk_template)
291

    
292

    
293
def _CheckOSVariant(os_obj, name):
294
  """Check whether an OS name conforms to the os variants specification.
295

296
  @type os_obj: L{objects.OS}
297
  @param os_obj: OS object to check
298
  @type name: string
299
  @param name: OS name passed by the user, to check for validity
300

301
  """
302
  variant = objects.OS.GetVariant(name)
303
  if not os_obj.supported_variants:
304
    if variant:
305
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
306
                                 " passed)" % (os_obj.name, variant),
307
                                 errors.ECODE_INVAL)
308
    return
309
  if not variant:
310
    raise errors.OpPrereqError("OS name must include a variant",
311
                               errors.ECODE_INVAL)
312

    
313
  if variant not in os_obj.supported_variants:
314
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
315

    
316

    
317
class LUInstanceCreate(LogicalUnit):
318
  """Create an instance.
319

320
  """
321
  HPATH = "instance-add"
322
  HTYPE = constants.HTYPE_INSTANCE
323
  REQ_BGL = False
324

    
325
  def CheckArguments(self):
326
    """Check arguments.
327

328
    """
329
    # do not require name_check to ease forward/backward compatibility
330
    # for tools
331
    if self.op.no_install and self.op.start:
332
      self.LogInfo("No-installation mode selected, disabling startup")
333
      self.op.start = False
334
    # validate/normalize the instance name
335
    self.op.instance_name = \
336
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
337

    
338
    if self.op.ip_check and not self.op.name_check:
339
      # TODO: make the ip check more flexible and not depend on the name check
340
      raise errors.OpPrereqError("Cannot do IP address check without a name"
341
                                 " check", errors.ECODE_INVAL)
342

    
343
    # check nics' parameter names
344
    for nic in self.op.nics:
345
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
346
    # check that NIC's parameters names are unique and valid
347
    utils.ValidateDeviceNames("NIC", self.op.nics)
348

    
349
    # check that disk's names are unique and valid
350
    utils.ValidateDeviceNames("disk", self.op.disks)
351

    
352
    cluster = self.cfg.GetClusterInfo()
353
    if not self.op.disk_template in cluster.enabled_disk_templates:
354
      raise errors.OpPrereqError("Cannot create an instance with disk template"
355
                                 " '%s', because it is not enabled in the"
356
                                 " cluster. Enabled disk templates are: %s." %
357
                                 (self.op.disk_template,
358
                                  ",".join(cluster.enabled_disk_templates)))
359

    
360
    # check disks. parameter names and consistent adopt/no-adopt strategy
361
    has_adopt = has_no_adopt = False
362
    for disk in self.op.disks:
363
      if self.op.disk_template != constants.DT_EXT:
364
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
365
      if constants.IDISK_ADOPT in disk:
366
        has_adopt = True
367
      else:
368
        has_no_adopt = True
369
    if has_adopt and has_no_adopt:
370
      raise errors.OpPrereqError("Either all disks are adopted or none is",
371
                                 errors.ECODE_INVAL)
372
    if has_adopt:
373
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
374
        raise errors.OpPrereqError("Disk adoption is not supported for the"
375
                                   " '%s' disk template" %
376
                                   self.op.disk_template,
377
                                   errors.ECODE_INVAL)
378
      if self.op.iallocator is not None:
379
        raise errors.OpPrereqError("Disk adoption not allowed with an"
380
                                   " iallocator script", errors.ECODE_INVAL)
381
      if self.op.mode == constants.INSTANCE_IMPORT:
382
        raise errors.OpPrereqError("Disk adoption not allowed for"
383
                                   " instance import", errors.ECODE_INVAL)
384
    else:
385
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
386
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
387
                                   " but no 'adopt' parameter given" %
388
                                   self.op.disk_template,
389
                                   errors.ECODE_INVAL)
390

    
391
    self.adopt_disks = has_adopt
392

    
393
    # instance name verification
394
    if self.op.name_check:
395
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
396
      self.op.instance_name = self.hostname1.name
397
      # used in CheckPrereq for ip ping check
398
      self.check_ip = self.hostname1.ip
399
    else:
400
      self.check_ip = None
401

    
402
    # file storage checks
403
    if (self.op.file_driver and
404
        not self.op.file_driver in constants.FILE_DRIVER):
405
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
406
                                 self.op.file_driver, errors.ECODE_INVAL)
407

    
408
    if self.op.disk_template == constants.DT_FILE:
409
      opcodes.RequireFileStorage()
410
    elif self.op.disk_template == constants.DT_SHARED_FILE:
411
      opcodes.RequireSharedFileStorage()
412

    
413
    ### Node/iallocator related checks
414
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
415

    
416
    if self.op.pnode is not None:
417
      if self.op.disk_template in constants.DTS_INT_MIRROR:
418
        if self.op.snode is None:
419
          raise errors.OpPrereqError("The networked disk templates need"
420
                                     " a mirror node", errors.ECODE_INVAL)
421
      elif self.op.snode:
422
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
423
                        " template")
424
        self.op.snode = None
425

    
426
    _CheckOpportunisticLocking(self.op)
427

    
428
    self._cds = GetClusterDomainSecret()
429

    
430
    if self.op.mode == constants.INSTANCE_IMPORT:
431
      # On import force_variant must be True, because if we forced it at
432
      # initial install, our only chance when importing it back is that it
433
      # works again!
434
      self.op.force_variant = True
435

    
436
      if self.op.no_install:
437
        self.LogInfo("No-installation mode has no effect during import")
438

    
439
    elif self.op.mode == constants.INSTANCE_CREATE:
440
      if self.op.os_type is None:
441
        raise errors.OpPrereqError("No guest OS specified",
442
                                   errors.ECODE_INVAL)
443
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
444
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
445
                                   " installation" % self.op.os_type,
446
                                   errors.ECODE_STATE)
447
      if self.op.disk_template is None:
448
        raise errors.OpPrereqError("No disk template specified",
449
                                   errors.ECODE_INVAL)
450

    
451
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
452
      # Check handshake to ensure both clusters have the same domain secret
453
      src_handshake = self.op.source_handshake
454
      if not src_handshake:
455
        raise errors.OpPrereqError("Missing source handshake",
456
                                   errors.ECODE_INVAL)
457

    
458
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
459
                                                           src_handshake)
460
      if errmsg:
461
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
462
                                   errors.ECODE_INVAL)
463

    
464
      # Load and check source CA
465
      self.source_x509_ca_pem = self.op.source_x509_ca
466
      if not self.source_x509_ca_pem:
467
        raise errors.OpPrereqError("Missing source X509 CA",
468
                                   errors.ECODE_INVAL)
469

    
470
      try:
471
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
472
                                                    self._cds)
473
      except OpenSSL.crypto.Error, err:
474
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
475
                                   (err, ), errors.ECODE_INVAL)
476

    
477
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
478
      if errcode is not None:
479
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
480
                                   errors.ECODE_INVAL)
481

    
482
      self.source_x509_ca = cert
483

    
484
      src_instance_name = self.op.source_instance_name
485
      if not src_instance_name:
486
        raise errors.OpPrereqError("Missing source instance name",
487
                                   errors.ECODE_INVAL)
488

    
489
      self.source_instance_name = \
490
        netutils.GetHostname(name=src_instance_name).name
491

    
492
    else:
493
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
494
                                 self.op.mode, errors.ECODE_INVAL)
495

    
496
  def ExpandNames(self):
497
    """ExpandNames for CreateInstance.
498

499
    Figure out the right locks for instance creation.
500

501
    """
502
    self.needed_locks = {}
503

    
504
    instance_name = self.op.instance_name
505
    # this is just a preventive check, but someone might still add this
506
    # instance in the meantime, and creation will fail at lock-add time
507
    if instance_name in self.cfg.GetInstanceList():
508
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
509
                                 instance_name, errors.ECODE_EXISTS)
510

    
511
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
512

    
513
    if self.op.iallocator:
514
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
515
      # specifying a group on instance creation and then selecting nodes from
516
      # that group
517
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
518
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
519

    
520
      if self.op.opportunistic_locking:
521
        self.opportunistic_locks[locking.LEVEL_NODE] = True
522
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
523
    else:
524
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
525
      nodelist = [self.op.pnode]
526
      if self.op.snode is not None:
527
        self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
528
        nodelist.append(self.op.snode)
529
      self.needed_locks[locking.LEVEL_NODE] = nodelist
530

    
531
    # in case of import lock the source node too
532
    if self.op.mode == constants.INSTANCE_IMPORT:
533
      src_node = self.op.src_node
534
      src_path = self.op.src_path
535

    
536
      if src_path is None:
537
        self.op.src_path = src_path = self.op.instance_name
538

    
539
      if src_node is None:
540
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
541
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
542
        self.op.src_node = None
543
        if os.path.isabs(src_path):
544
          raise errors.OpPrereqError("Importing an instance from a path"
545
                                     " requires a source node option",
546
                                     errors.ECODE_INVAL)
547
      else:
548
        self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
549
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
550
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
551
        if not os.path.isabs(src_path):
552
          self.op.src_path = src_path = \
553
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
554

    
555
    self.needed_locks[locking.LEVEL_NODE_RES] = \
556
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
557

    
558
  def _RunAllocator(self):
559
    """Run the allocator based on input opcode.
560

561
    """
562
    if self.op.opportunistic_locking:
563
      # Only consider nodes for which a lock is held
564
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
565
    else:
566
      node_whitelist = None
567

    
568
    #TODO Export network to iallocator so that it chooses a pnode
569
    #     in a nodegroup that has the desired network connected to
570
    req = _CreateInstanceAllocRequest(self.op, self.disks,
571
                                      self.nics, self.be_full,
572
                                      node_whitelist)
573
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
574

    
575
    ial.Run(self.op.iallocator)
576

    
577
    if not ial.success:
578
      # When opportunistic locks are used only a temporary failure is generated
579
      if self.op.opportunistic_locking:
580
        ecode = errors.ECODE_TEMP_NORES
581
      else:
582
        ecode = errors.ECODE_NORES
583

    
584
      raise errors.OpPrereqError("Can't compute nodes using"
585
                                 " iallocator '%s': %s" %
586
                                 (self.op.iallocator, ial.info),
587
                                 ecode)
588

    
589
    self.op.pnode = ial.result[0]
590
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
591
                 self.op.instance_name, self.op.iallocator,
592
                 utils.CommaJoin(ial.result))
593

    
594
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
595

    
596
    if req.RequiredNodes() == 2:
597
      self.op.snode = ial.result[1]
598

    
599
  def BuildHooksEnv(self):
600
    """Build hooks env.
601

602
    This runs on master, primary and secondary nodes of the instance.
603

604
    """
605
    env = {
606
      "ADD_MODE": self.op.mode,
607
      }
608
    if self.op.mode == constants.INSTANCE_IMPORT:
609
      env["SRC_NODE"] = self.op.src_node
610
      env["SRC_PATH"] = self.op.src_path
611
      env["SRC_IMAGES"] = self.src_images
612

    
613
    env.update(BuildInstanceHookEnv(
614
      name=self.op.instance_name,
615
      primary_node=self.op.pnode,
616
      secondary_nodes=self.secondaries,
617
      status=self.op.start,
618
      os_type=self.op.os_type,
619
      minmem=self.be_full[constants.BE_MINMEM],
620
      maxmem=self.be_full[constants.BE_MAXMEM],
621
      vcpus=self.be_full[constants.BE_VCPUS],
622
      nics=NICListToTuple(self, self.nics),
623
      disk_template=self.op.disk_template,
624
      disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
625
              d[constants.IDISK_MODE]) for d in self.disks],
626
      bep=self.be_full,
627
      hvp=self.hv_full,
628
      hypervisor_name=self.op.hypervisor,
629
      tags=self.op.tags,
630
      ))
631

    
632
    return env
633

    
634
  def BuildHooksNodes(self):
635
    """Build hooks nodes.
636

637
    """
638
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
639
    return nl, nl
640

    
641
  def _ReadExportInfo(self):
642
    """Reads the export information from disk.
643

644
    It will override the opcode source node and path with the actual
645
    information, if these two were not specified before.
646

647
    @return: the export information
648

649
    """
650
    assert self.op.mode == constants.INSTANCE_IMPORT
651

    
652
    src_node = self.op.src_node
653
    src_path = self.op.src_path
654

    
655
    if src_node is None:
656
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
657
      exp_list = self.rpc.call_export_list(locked_nodes)
658
      found = False
659
      for node in exp_list:
660
        if exp_list[node].fail_msg:
661
          continue
662
        if src_path in exp_list[node].payload:
663
          found = True
664
          self.op.src_node = src_node = node
665
          self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
666
                                                       src_path)
667
          break
668
      if not found:
669
        raise errors.OpPrereqError("No export found for relative path %s" %
670
                                   src_path, errors.ECODE_INVAL)
671

    
672
    CheckNodeOnline(self, src_node)
673
    result = self.rpc.call_export_info(src_node, src_path)
674
    result.Raise("No export or invalid export found in dir %s" % src_path)
675

    
676
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
677
    if not export_info.has_section(constants.INISECT_EXP):
678
      raise errors.ProgrammerError("Corrupted export config",
679
                                   errors.ECODE_ENVIRON)
680

    
681
    ei_version = export_info.get(constants.INISECT_EXP, "version")
682
    if (int(ei_version) != constants.EXPORT_VERSION):
683
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
684
                                 (ei_version, constants.EXPORT_VERSION),
685
                                 errors.ECODE_ENVIRON)
686
    return export_info
687

    
688
  def _ReadExportParams(self, einfo):
689
    """Use export parameters as defaults.
690

691
    In case the opcode doesn't specify (as in override) some instance
692
    parameters, then try to use them from the export information, if
693
    that declares them.
694

695
    """
696
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
697

    
698
    if self.op.disk_template is None:
699
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
700
        self.op.disk_template = einfo.get(constants.INISECT_INS,
701
                                          "disk_template")
702
        if self.op.disk_template not in constants.DISK_TEMPLATES:
703
          raise errors.OpPrereqError("Disk template specified in configuration"
704
                                     " file is not one of the allowed values:"
705
                                     " %s" %
706
                                     " ".join(constants.DISK_TEMPLATES),
707
                                     errors.ECODE_INVAL)
708
      else:
709
        raise errors.OpPrereqError("No disk template specified and the export"
710
                                   " is missing the disk_template information",
711
                                   errors.ECODE_INVAL)
712

    
713
    if not self.op.disks:
714
      disks = []
715
      # TODO: import the disk iv_name too
716
      for idx in range(constants.MAX_DISKS):
717
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
718
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
719
          disks.append({constants.IDISK_SIZE: disk_sz})
720
      self.op.disks = disks
721
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
722
        raise errors.OpPrereqError("No disk info specified and the export"
723
                                   " is missing the disk information",
724
                                   errors.ECODE_INVAL)
725

    
726
    if not self.op.nics:
727
      nics = []
728
      for idx in range(constants.MAX_NICS):
729
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
730
          ndict = {}
731
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
732
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
733
            ndict[name] = v
734
          nics.append(ndict)
735
        else:
736
          break
737
      self.op.nics = nics
738

    
739
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
740
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
741

    
742
    if (self.op.hypervisor is None and
743
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
744
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
745

    
746
    if einfo.has_section(constants.INISECT_HYP):
747
      # use the export parameters but do not override the ones
748
      # specified by the user
749
      for name, value in einfo.items(constants.INISECT_HYP):
750
        if name not in self.op.hvparams:
751
          self.op.hvparams[name] = value
752

    
753
    if einfo.has_section(constants.INISECT_BEP):
754
      # use the parameters, without overriding
755
      for name, value in einfo.items(constants.INISECT_BEP):
756
        if name not in self.op.beparams:
757
          self.op.beparams[name] = value
758
        # Compatibility for the old "memory" be param
759
        if name == constants.BE_MEMORY:
760
          if constants.BE_MAXMEM not in self.op.beparams:
761
            self.op.beparams[constants.BE_MAXMEM] = value
762
          if constants.BE_MINMEM not in self.op.beparams:
763
            self.op.beparams[constants.BE_MINMEM] = value
764
    else:
765
      # try to read the parameters old style, from the main section
766
      for name in constants.BES_PARAMETERS:
767
        if (name not in self.op.beparams and
768
            einfo.has_option(constants.INISECT_INS, name)):
769
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
770

    
771
    if einfo.has_section(constants.INISECT_OSP):
772
      # use the parameters, without overriding
773
      for name, value in einfo.items(constants.INISECT_OSP):
774
        if name not in self.op.osparams:
775
          self.op.osparams[name] = value
776

    
777
  def _RevertToDefaults(self, cluster):
778
    """Revert the instance parameters to the default values.
779

780
    """
781
    # hvparams
782
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
783
    for name in self.op.hvparams.keys():
784
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
785
        del self.op.hvparams[name]
786
    # beparams
787
    be_defs = cluster.SimpleFillBE({})
788
    for name in self.op.beparams.keys():
789
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
790
        del self.op.beparams[name]
791
    # nic params
792
    nic_defs = cluster.SimpleFillNIC({})
793
    for nic in self.op.nics:
794
      for name in constants.NICS_PARAMETERS:
795
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
796
          del nic[name]
797
    # osparams
798
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
799
    for name in self.op.osparams.keys():
800
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
801
        del self.op.osparams[name]
802

    
803
  def _CalculateFileStorageDir(self):
804
    """Calculate final instance file storage dir.
805

806
    """
807
    # file storage dir calculation/check
808
    self.instance_file_storage_dir = None
809
    if self.op.disk_template in constants.DTS_FILEBASED:
810
      # build the full file storage dir path
811
      joinargs = []
812

    
813
      if self.op.disk_template == constants.DT_SHARED_FILE:
814
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
815
      else:
816
        get_fsd_fn = self.cfg.GetFileStorageDir
817

    
818
      cfg_storagedir = get_fsd_fn()
819
      if not cfg_storagedir:
820
        raise errors.OpPrereqError("Cluster file storage dir not defined",
821
                                   errors.ECODE_STATE)
822
      joinargs.append(cfg_storagedir)
823

    
824
      if self.op.file_storage_dir is not None:
825
        joinargs.append(self.op.file_storage_dir)
826

    
827
      joinargs.append(self.op.instance_name)
828

    
829
      # pylint: disable=W0142
830
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
831

    
832
  def CheckPrereq(self): # pylint: disable=R0914
833
    """Check prerequisites.
834

835
    """
836
    self._CalculateFileStorageDir()
837

    
838
    if self.op.mode == constants.INSTANCE_IMPORT:
839
      export_info = self._ReadExportInfo()
840
      self._ReadExportParams(export_info)
841
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
842
    else:
843
      self._old_instance_name = None
844

    
845
    if (not self.cfg.GetVGName() and
846
        self.op.disk_template not in constants.DTS_NOT_LVM):
847
      raise errors.OpPrereqError("Cluster does not support lvm-based"
848
                                 " instances", errors.ECODE_STATE)
849

    
850
    if (self.op.hypervisor is None or
851
        self.op.hypervisor == constants.VALUE_AUTO):
852
      self.op.hypervisor = self.cfg.GetHypervisorType()
853

    
854
    cluster = self.cfg.GetClusterInfo()
855
    enabled_hvs = cluster.enabled_hypervisors
856
    if self.op.hypervisor not in enabled_hvs:
857
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
858
                                 " cluster (%s)" %
859
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
860
                                 errors.ECODE_STATE)
861

    
862
    # Check tag validity
863
    for tag in self.op.tags:
864
      objects.TaggableObject.ValidateTag(tag)
865

    
866
    # check hypervisor parameter syntax (locally)
867
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
868
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
869
                                      self.op.hvparams)
870
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
871
    hv_type.CheckParameterSyntax(filled_hvp)
872
    self.hv_full = filled_hvp
873
    # check that we don't specify global parameters on an instance
874
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
875
                         "instance", "cluster")
876

    
877
    # fill and remember the beparams dict
878
    self.be_full = _ComputeFullBeParams(self.op, cluster)
879

    
880
    # build os parameters
881
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
882

    
883
    # now that hvp/bep are in final format, let's reset to defaults,
884
    # if told to do so
885
    if self.op.identify_defaults:
886
      self._RevertToDefaults(cluster)
887

    
888
    # NIC buildup
889
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
890
                             self.proc.GetECId())
891

    
892
    # disk checks/pre-build
893
    default_vg = self.cfg.GetVGName()
894
    self.disks = ComputeDisks(self.op, default_vg)
895

    
896
    if self.op.mode == constants.INSTANCE_IMPORT:
897
      disk_images = []
898
      for idx in range(len(self.disks)):
899
        option = "disk%d_dump" % idx
900
        if export_info.has_option(constants.INISECT_INS, option):
901
          # FIXME: are the old os-es, disk sizes, etc. useful?
902
          export_name = export_info.get(constants.INISECT_INS, option)
903
          image = utils.PathJoin(self.op.src_path, export_name)
904
          disk_images.append(image)
905
        else:
906
          disk_images.append(False)
907

    
908
      self.src_images = disk_images
909

    
910
      if self.op.instance_name == self._old_instance_name:
911
        for idx, nic in enumerate(self.nics):
912
          if nic.mac == constants.VALUE_AUTO:
913
            nic_mac_ini = "nic%d_mac" % idx
914
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
915

    
916
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
917

    
918
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
919
    if self.op.ip_check:
920
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
921
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
922
                                   (self.check_ip, self.op.instance_name),
923
                                   errors.ECODE_NOTUNIQUE)
924

    
925
    #### mac address generation
926
    # By generating here the mac address both the allocator and the hooks get
927
    # the real final mac address rather than the 'auto' or 'generate' value.
928
    # There is a race condition between the generation and the instance object
929
    # creation, which means that we know the mac is valid now, but we're not
930
    # sure it will be when we actually add the instance. If things go bad
931
    # adding the instance will abort because of a duplicate mac, and the
932
    # creation job will fail.
933
    for nic in self.nics:
934
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
935
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
936

    
937
    #### allocator run
938

    
939
    if self.op.iallocator is not None:
940
      self._RunAllocator()
941

    
942
    # Release all unneeded node locks
943
    keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
944
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
945
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
946
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
947

    
948
    assert (self.owned_locks(locking.LEVEL_NODE) ==
949
            self.owned_locks(locking.LEVEL_NODE_RES)), \
950
      "Node locks differ from node resource locks"
951

    
952
    #### node related checks
953

    
954
    # check primary node
955
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
956
    assert self.pnode is not None, \
957
      "Cannot retrieve locked node %s" % self.op.pnode
958
    if pnode.offline:
959
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
960
                                 pnode.name, errors.ECODE_STATE)
961
    if pnode.drained:
962
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
963
                                 pnode.name, errors.ECODE_STATE)
964
    if not pnode.vm_capable:
965
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
966
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
967

    
968
    self.secondaries = []
969

    
970
    # Fill in any IPs from IP pools. This must happen here, because we need to
971
    # know the nic's primary node, as specified by the iallocator
972
    for idx, nic in enumerate(self.nics):
973
      net_uuid = nic.network
974
      if net_uuid is not None:
975
        nobj = self.cfg.GetNetwork(net_uuid)
976
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
977
        if netparams is None:
978
          raise errors.OpPrereqError("No netparams found for network"
979
                                     " %s. Propably not connected to"
980
                                     " node's %s nodegroup" %
981
                                     (nobj.name, self.pnode.name),
982
                                     errors.ECODE_INVAL)
983
        self.LogInfo("NIC/%d inherits netparams %s" %
984
                     (idx, netparams.values()))
985
        nic.nicparams = dict(netparams)
986
        if nic.ip is not None:
987
          if nic.ip.lower() == constants.NIC_IP_POOL:
988
            try:
989
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
990
            except errors.ReservationError:
991
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
992
                                         " from the address pool" % idx,
993
                                         errors.ECODE_STATE)
994
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
995
          else:
996
            try:
997
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
998
            except errors.ReservationError:
999
              raise errors.OpPrereqError("IP address %s already in use"
1000
                                         " or does not belong to network %s" %
1001
                                         (nic.ip, nobj.name),
1002
                                         errors.ECODE_NOTUNIQUE)
1003

    
1004
      # net is None, ip None or given
1005
      elif self.op.conflicts_check:
1006
        _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1007

    
1008
    # mirror node verification
1009
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1010
      if self.op.snode == pnode.name:
1011
        raise errors.OpPrereqError("The secondary node cannot be the"
1012
                                   " primary node", errors.ECODE_INVAL)
1013
      CheckNodeOnline(self, self.op.snode)
1014
      CheckNodeNotDrained(self, self.op.snode)
1015
      CheckNodeVmCapable(self, self.op.snode)
1016
      self.secondaries.append(self.op.snode)
1017

    
1018
      snode = self.cfg.GetNodeInfo(self.op.snode)
1019
      if pnode.group != snode.group:
1020
        self.LogWarning("The primary and secondary nodes are in two"
1021
                        " different node groups; the disk parameters"
1022
                        " from the first disk's node group will be"
1023
                        " used")
1024

    
1025
    nodes = [pnode]
1026
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1027
      nodes.append(snode)
1028
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1029
    excl_stor = compat.any(map(has_es, nodes))
1030
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1031
      raise errors.OpPrereqError("Disk template %s not supported with"
1032
                                 " exclusive storage" % self.op.disk_template,
1033
                                 errors.ECODE_STATE)
1034
    for disk in self.disks:
1035
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1036

    
1037
    nodenames = [pnode.name] + self.secondaries
1038

    
1039
    if not self.adopt_disks:
1040
      if self.op.disk_template == constants.DT_RBD:
1041
        # _CheckRADOSFreeSpace() is just a placeholder.
1042
        # Any function that checks prerequisites can be placed here.
1043
        # Check if there is enough space on the RADOS cluster.
1044
        CheckRADOSFreeSpace()
1045
      elif self.op.disk_template == constants.DT_EXT:
1046
        # FIXME: Function that checks prereqs if needed
1047
        pass
1048
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1049
        # Check lv size requirements, if not adopting
1050
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1051
        CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1052
      else:
1053
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1054
        pass
1055

    
1056
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1057
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1058
                                disk[constants.IDISK_ADOPT])
1059
                     for disk in self.disks])
1060
      if len(all_lvs) != len(self.disks):
1061
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1062
                                   errors.ECODE_INVAL)
1063
      for lv_name in all_lvs:
1064
        try:
1065
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1066
          # to ReserveLV uses the same syntax
1067
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1068
        except errors.ReservationError:
1069
          raise errors.OpPrereqError("LV named %s used by another instance" %
1070
                                     lv_name, errors.ECODE_NOTUNIQUE)
1071

    
1072
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1073
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1074

    
1075
      node_lvs = self.rpc.call_lv_list([pnode.name],
1076
                                       vg_names.payload.keys())[pnode.name]
1077
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1078
      node_lvs = node_lvs.payload
1079

    
1080
      delta = all_lvs.difference(node_lvs.keys())
1081
      if delta:
1082
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1083
                                   utils.CommaJoin(delta),
1084
                                   errors.ECODE_INVAL)
1085
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1086
      if online_lvs:
1087
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1088
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1089
                                   errors.ECODE_STATE)
1090
      # update the size of disk based on what is found
1091
      for dsk in self.disks:
1092
        dsk[constants.IDISK_SIZE] = \
1093
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1094
                                        dsk[constants.IDISK_ADOPT])][0]))
1095

    
1096
    elif self.op.disk_template == constants.DT_BLOCK:
1097
      # Normalize and de-duplicate device paths
1098
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1099
                       for disk in self.disks])
1100
      if len(all_disks) != len(self.disks):
1101
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1102
                                   errors.ECODE_INVAL)
1103
      baddisks = [d for d in all_disks
1104
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1105
      if baddisks:
1106
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1107
                                   " cannot be adopted" %
1108
                                   (utils.CommaJoin(baddisks),
1109
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1110
                                   errors.ECODE_INVAL)
1111

    
1112
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
1113
                                            list(all_disks))[pnode.name]
1114
      node_disks.Raise("Cannot get block device information from node %s" %
1115
                       pnode.name)
1116
      node_disks = node_disks.payload
1117
      delta = all_disks.difference(node_disks.keys())
1118
      if delta:
1119
        raise errors.OpPrereqError("Missing block device(s): %s" %
1120
                                   utils.CommaJoin(delta),
1121
                                   errors.ECODE_INVAL)
1122
      for dsk in self.disks:
1123
        dsk[constants.IDISK_SIZE] = \
1124
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1125

    
1126
    # Verify instance specs
1127
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1128
    ispec = {
1129
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1130
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1131
      constants.ISPEC_DISK_COUNT: len(self.disks),
1132
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1133
                                  for disk in self.disks],
1134
      constants.ISPEC_NIC_COUNT: len(self.nics),
1135
      constants.ISPEC_SPINDLE_USE: spindle_use,
1136
      }
1137

    
1138
    group_info = self.cfg.GetNodeGroup(pnode.group)
1139
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1140
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1141
                                               self.op.disk_template)
1142
    if not self.op.ignore_ipolicy and res:
1143
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1144
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1145
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1146

    
1147
    CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1148

    
1149
    CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1150
    # check OS parameters (remotely)
1151
    CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1152

    
1153
    CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1154

    
1155
    #TODO: _CheckExtParams (remotely)
1156
    # Check parameters for extstorage
1157

    
1158
    # memory check on primary node
1159
    #TODO(dynmem): use MINMEM for checking
1160
    if self.op.start:
1161
      CheckNodeFreeMemory(self, self.pnode.name,
1162
                          "creating instance %s" % self.op.instance_name,
1163
                          self.be_full[constants.BE_MAXMEM],
1164
                          self.op.hypervisor)
1165

    
1166
    self.dry_run_result = list(nodenames)
1167

    
1168
  def Exec(self, feedback_fn):
1169
    """Create and add the instance to the cluster.
1170

1171
    """
1172
    instance = self.op.instance_name
1173
    pnode_name = self.pnode.name
1174

    
1175
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1176
                self.owned_locks(locking.LEVEL_NODE)), \
1177
      "Node locks differ from node resource locks"
1178
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1179

    
1180
    ht_kind = self.op.hypervisor
1181
    if ht_kind in constants.HTS_REQ_PORT:
1182
      network_port = self.cfg.AllocatePort()
1183
    else:
1184
      network_port = None
1185

    
1186
    # This is ugly but we got a chicken-egg problem here
1187
    # We can only take the group disk parameters, as the instance
1188
    # has no disks yet (we are generating them right here).
1189
    node = self.cfg.GetNodeInfo(pnode_name)
1190
    nodegroup = self.cfg.GetNodeGroup(node.group)
1191
    disks = GenerateDiskTemplate(self,
1192
                                 self.op.disk_template,
1193
                                 instance, pnode_name,
1194
                                 self.secondaries,
1195
                                 self.disks,
1196
                                 self.instance_file_storage_dir,
1197
                                 self.op.file_driver,
1198
                                 0,
1199
                                 feedback_fn,
1200
                                 self.cfg.GetGroupDiskParams(nodegroup))
1201

    
1202
    iobj = objects.Instance(name=instance, os=self.op.os_type,
1203
                            primary_node=pnode_name,
1204
                            nics=self.nics, disks=disks,
1205
                            disk_template=self.op.disk_template,
1206
                            disks_active=False,
1207
                            admin_state=constants.ADMINST_DOWN,
1208
                            network_port=network_port,
1209
                            beparams=self.op.beparams,
1210
                            hvparams=self.op.hvparams,
1211
                            hypervisor=self.op.hypervisor,
1212
                            osparams=self.op.osparams,
1213
                            )
1214

    
1215
    if self.op.tags:
1216
      for tag in self.op.tags:
1217
        iobj.AddTag(tag)
1218

    
1219
    if self.adopt_disks:
1220
      if self.op.disk_template == constants.DT_PLAIN:
1221
        # rename LVs to the newly-generated names; we need to construct
1222
        # 'fake' LV disks with the old data, plus the new unique_id
1223
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1224
        rename_to = []
1225
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1226
          rename_to.append(t_dsk.logical_id)
1227
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1228
          self.cfg.SetDiskID(t_dsk, pnode_name)
1229
        result = self.rpc.call_blockdev_rename(pnode_name,
1230
                                               zip(tmp_disks, rename_to))
1231
        result.Raise("Failed to rename adoped LVs")
1232
    else:
1233
      feedback_fn("* creating instance disks...")
1234
      try:
1235
        CreateDisks(self, iobj)
1236
      except errors.OpExecError:
1237
        self.LogWarning("Device creation failed")
1238
        self.cfg.ReleaseDRBDMinors(instance)
1239
        raise
1240

    
1241
    feedback_fn("adding instance %s to cluster config" % instance)
1242

    
1243
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1244

    
1245
    # Declare that we don't want to remove the instance lock anymore, as we've
1246
    # added the instance to the config
1247
    del self.remove_locks[locking.LEVEL_INSTANCE]
1248

    
1249
    if self.op.mode == constants.INSTANCE_IMPORT:
1250
      # Release unused nodes
1251
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1252
    else:
1253
      # Release all nodes
1254
      ReleaseLocks(self, locking.LEVEL_NODE)
1255

    
1256
    disk_abort = False
1257
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1258
      feedback_fn("* wiping instance disks...")
1259
      try:
1260
        WipeDisks(self, iobj)
1261
      except errors.OpExecError, err:
1262
        logging.exception("Wiping disks failed")
1263
        self.LogWarning("Wiping instance disks failed (%s)", err)
1264
        disk_abort = True
1265

    
1266
    if disk_abort:
1267
      # Something is already wrong with the disks, don't do anything else
1268
      pass
1269
    elif self.op.wait_for_sync:
1270
      disk_abort = not WaitForSync(self, iobj)
1271
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1272
      # make sure the disks are not degraded (still sync-ing is ok)
1273
      feedback_fn("* checking mirrors status")
1274
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1275
    else:
1276
      disk_abort = False
1277

    
1278
    if disk_abort:
1279
      RemoveDisks(self, iobj)
1280
      self.cfg.RemoveInstance(iobj.name)
1281
      # Make sure the instance lock gets removed
1282
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1283
      raise errors.OpExecError("There are some degraded disks for"
1284
                               " this instance")
1285

    
1286
    # instance disks are now active
1287
    iobj.disks_active = True
1288

    
1289
    # Release all node resource locks
1290
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1291

    
1292
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1293
      # we need to set the disks ID to the primary node, since the
1294
      # preceding code might or might have not done it, depending on
1295
      # disk template and other options
1296
      for disk in iobj.disks:
1297
        self.cfg.SetDiskID(disk, pnode_name)
1298
      if self.op.mode == constants.INSTANCE_CREATE:
1299
        if not self.op.no_install:
1300
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1301
                        not self.op.wait_for_sync)
1302
          if pause_sync:
1303
            feedback_fn("* pausing disk sync to install instance OS")
1304
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1305
                                                              (iobj.disks,
1306
                                                               iobj), True)
1307
            for idx, success in enumerate(result.payload):
1308
              if not success:
1309
                logging.warn("pause-sync of instance %s for disk %d failed",
1310
                             instance, idx)
1311

    
1312
          feedback_fn("* running the instance OS create scripts...")
1313
          # FIXME: pass debug option from opcode to backend
1314
          os_add_result = \
1315
            self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1316
                                          self.op.debug_level)
1317
          if pause_sync:
1318
            feedback_fn("* resuming disk sync")
1319
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1320
                                                              (iobj.disks,
1321
                                                               iobj), False)
1322
            for idx, success in enumerate(result.payload):
1323
              if not success:
1324
                logging.warn("resume-sync of instance %s for disk %d failed",
1325
                             instance, idx)
1326

    
1327
          os_add_result.Raise("Could not add os for instance %s"
1328
                              " on node %s" % (instance, pnode_name))
1329

    
1330
      else:
1331
        if self.op.mode == constants.INSTANCE_IMPORT:
1332
          feedback_fn("* running the instance OS import scripts...")
1333

    
1334
          transfers = []
1335

    
1336
          for idx, image in enumerate(self.src_images):
1337
            if not image:
1338
              continue
1339

    
1340
            # FIXME: pass debug option from opcode to backend
1341
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1342
                                               constants.IEIO_FILE, (image, ),
1343
                                               constants.IEIO_SCRIPT,
1344
                                               (iobj.disks[idx], idx),
1345
                                               None)
1346
            transfers.append(dt)
1347

    
1348
          import_result = \
1349
            masterd.instance.TransferInstanceData(self, feedback_fn,
1350
                                                  self.op.src_node, pnode_name,
1351
                                                  self.pnode.secondary_ip,
1352
                                                  iobj, transfers)
1353
          if not compat.all(import_result):
1354
            self.LogWarning("Some disks for instance %s on node %s were not"
1355
                            " imported successfully" % (instance, pnode_name))
1356

    
1357
          rename_from = self._old_instance_name
1358

    
1359
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1360
          feedback_fn("* preparing remote import...")
1361
          # The source cluster will stop the instance before attempting to make
1362
          # a connection. In some cases stopping an instance can take a long
1363
          # time, hence the shutdown timeout is added to the connection
1364
          # timeout.
1365
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1366
                             self.op.source_shutdown_timeout)
1367
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1368

    
1369
          assert iobj.primary_node == self.pnode.name
1370
          disk_results = \
1371
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1372
                                          self.source_x509_ca,
1373
                                          self._cds, timeouts)
1374
          if not compat.all(disk_results):
1375
            # TODO: Should the instance still be started, even if some disks
1376
            # failed to import (valid for local imports, too)?
1377
            self.LogWarning("Some disks for instance %s on node %s were not"
1378
                            " imported successfully" % (instance, pnode_name))
1379

    
1380
          rename_from = self.source_instance_name
1381

    
1382
        else:
1383
          # also checked in the prereq part
1384
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1385
                                       % self.op.mode)
1386

    
1387
        # Run rename script on newly imported instance
1388
        assert iobj.name == instance
1389
        feedback_fn("Running rename script for %s" % instance)
1390
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1391
                                                   rename_from,
1392
                                                   self.op.debug_level)
1393
        result.Warn("Failed to run rename script for %s on node %s" %
1394
                    (instance, pnode_name), self.LogWarning)
1395

    
1396
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1397

    
1398
    if self.op.start:
1399
      iobj.admin_state = constants.ADMINST_UP
1400
      self.cfg.Update(iobj, feedback_fn)
1401
      logging.info("Starting instance %s on node %s", instance, pnode_name)
1402
      feedback_fn("* starting instance...")
1403
      result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1404
                                            False, self.op.reason)
1405
      result.Raise("Could not start instance")
1406

    
1407
    return list(iobj.all_nodes)
1408

    
1409

    
1410
class LUInstanceRename(LogicalUnit):
1411
  """Rename an instance.
1412

1413
  """
1414
  HPATH = "instance-rename"
1415
  HTYPE = constants.HTYPE_INSTANCE
1416

    
1417
  def CheckArguments(self):
1418
    """Check arguments.
1419

1420
    """
1421
    if self.op.ip_check and not self.op.name_check:
1422
      # TODO: make the ip check more flexible and not depend on the name check
1423
      raise errors.OpPrereqError("IP address check requires a name check",
1424
                                 errors.ECODE_INVAL)
1425

    
1426
  def BuildHooksEnv(self):
1427
    """Build hooks env.
1428

1429
    This runs on master, primary and secondary nodes of the instance.
1430

1431
    """
1432
    env = BuildInstanceHookEnvByObject(self, self.instance)
1433
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1434
    return env
1435

    
1436
  def BuildHooksNodes(self):
1437
    """Build hooks nodes.
1438

1439
    """
1440
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1441
    return (nl, nl)
1442

    
1443
  def CheckPrereq(self):
1444
    """Check prerequisites.
1445

1446
    This checks that the instance is in the cluster and is not running.
1447

1448
    """
1449
    self.op.instance_name = ExpandInstanceName(self.cfg,
1450
                                               self.op.instance_name)
1451
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1452
    assert instance is not None
1453
    CheckNodeOnline(self, instance.primary_node)
1454
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1455
                       msg="cannot rename")
1456
    self.instance = instance
1457

    
1458
    new_name = self.op.new_name
1459
    if self.op.name_check:
1460
      hostname = _CheckHostnameSane(self, new_name)
1461
      new_name = self.op.new_name = hostname.name
1462
      if (self.op.ip_check and
1463
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1464
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1465
                                   (hostname.ip, new_name),
1466
                                   errors.ECODE_NOTUNIQUE)
1467

    
1468
    instance_list = self.cfg.GetInstanceList()
1469
    if new_name in instance_list and new_name != instance.name:
1470
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1471
                                 new_name, errors.ECODE_EXISTS)
1472

    
1473
  def Exec(self, feedback_fn):
1474
    """Rename the instance.
1475

1476
    """
1477
    inst = self.instance
1478
    old_name = inst.name
1479

    
1480
    rename_file_storage = False
1481
    if (inst.disk_template in constants.DTS_FILEBASED and
1482
        self.op.new_name != inst.name):
1483
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1484
      rename_file_storage = True
1485

    
1486
    self.cfg.RenameInstance(inst.name, self.op.new_name)
1487
    # Change the instance lock. This is definitely safe while we hold the BGL.
1488
    # Otherwise the new lock would have to be added in acquired mode.
1489
    assert self.REQ_BGL
1490
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1491
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1492
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1493

    
1494
    # re-read the instance from the configuration after rename
1495
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
1496

    
1497
    if rename_file_storage:
1498
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1499
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1500
                                                     old_file_storage_dir,
1501
                                                     new_file_storage_dir)
1502
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1503
                   " (but the instance has been renamed in Ganeti)" %
1504
                   (inst.primary_node, old_file_storage_dir,
1505
                    new_file_storage_dir))
1506

    
1507
    StartInstanceDisks(self, inst, None)
1508
    # update info on disks
1509
    info = GetInstanceInfoText(inst)
1510
    for (idx, disk) in enumerate(inst.disks):
1511
      for node in inst.all_nodes:
1512
        self.cfg.SetDiskID(disk, node)
1513
        result = self.rpc.call_blockdev_setinfo(node, disk, info)
1514
        result.Warn("Error setting info on node %s for disk %s" % (node, idx),
1515
                    self.LogWarning)
1516
    try:
1517
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1518
                                                 old_name, self.op.debug_level)
1519
      result.Warn("Could not run OS rename script for instance %s on node %s"
1520
                  " (but the instance has been renamed in Ganeti)" %
1521
                  (inst.name, inst.primary_node), self.LogWarning)
1522
    finally:
1523
      ShutdownInstanceDisks(self, inst)
1524

    
1525
    return inst.name
1526

    
1527

    
1528
class LUInstanceRemove(LogicalUnit):
1529
  """Remove an instance.
1530

1531
  """
1532
  HPATH = "instance-remove"
1533
  HTYPE = constants.HTYPE_INSTANCE
1534
  REQ_BGL = False
1535

    
1536
  def ExpandNames(self):
1537
    self._ExpandAndLockInstance()
1538
    self.needed_locks[locking.LEVEL_NODE] = []
1539
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1540
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1541

    
1542
  def DeclareLocks(self, level):
1543
    if level == locking.LEVEL_NODE:
1544
      self._LockInstancesNodes()
1545
    elif level == locking.LEVEL_NODE_RES:
1546
      # Copy node locks
1547
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1548
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1549

    
1550
  def BuildHooksEnv(self):
1551
    """Build hooks env.
1552

1553
    This runs on master, primary and secondary nodes of the instance.
1554

1555
    """
1556
    env = BuildInstanceHookEnvByObject(self, self.instance)
1557
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1558
    return env
1559

    
1560
  def BuildHooksNodes(self):
1561
    """Build hooks nodes.
1562

1563
    """
1564
    nl = [self.cfg.GetMasterNode()]
1565
    nl_post = list(self.instance.all_nodes) + nl
1566
    return (nl, nl_post)
1567

    
1568
  def CheckPrereq(self):
1569
    """Check prerequisites.
1570

1571
    This checks that the instance is in the cluster.
1572

1573
    """
1574
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1575
    assert self.instance is not None, \
1576
      "Cannot retrieve locked instance %s" % self.op.instance_name
1577

    
1578
  def Exec(self, feedback_fn):
1579
    """Remove the instance.
1580

1581
    """
1582
    instance = self.instance
1583
    logging.info("Shutting down instance %s on node %s",
1584
                 instance.name, instance.primary_node)
1585

    
1586
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1587
                                             self.op.shutdown_timeout,
1588
                                             self.op.reason)
1589
    if self.op.ignore_failures:
1590
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1591
    else:
1592
      result.Raise("Could not shutdown instance %s on node %s" %
1593
                   (instance.name, instance.primary_node))
1594

    
1595
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1596
            self.owned_locks(locking.LEVEL_NODE_RES))
1597
    assert not (set(instance.all_nodes) -
1598
                self.owned_locks(locking.LEVEL_NODE)), \
1599
      "Not owning correct locks"
1600

    
1601
    RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1602

    
1603

    
1604
class LUInstanceMove(LogicalUnit):
1605
  """Move an instance by data-copying.
1606

1607
  """
1608
  HPATH = "instance-move"
1609
  HTYPE = constants.HTYPE_INSTANCE
1610
  REQ_BGL = False
1611

    
1612
  def ExpandNames(self):
1613
    self._ExpandAndLockInstance()
1614
    target_node = ExpandNodeName(self.cfg, self.op.target_node)
1615
    self.op.target_node = target_node
1616
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
1617
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1618
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1619

    
1620
  def DeclareLocks(self, level):
1621
    if level == locking.LEVEL_NODE:
1622
      self._LockInstancesNodes(primary_only=True)
1623
    elif level == locking.LEVEL_NODE_RES:
1624
      # Copy node locks
1625
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1626
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1627

    
1628
  def BuildHooksEnv(self):
1629
    """Build hooks env.
1630

1631
    This runs on master, primary and secondary nodes of the instance.
1632

1633
    """
1634
    env = {
1635
      "TARGET_NODE": self.op.target_node,
1636
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1637
      }
1638
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1639
    return env
1640

    
1641
  def BuildHooksNodes(self):
1642
    """Build hooks nodes.
1643

1644
    """
1645
    nl = [
1646
      self.cfg.GetMasterNode(),
1647
      self.instance.primary_node,
1648
      self.op.target_node,
1649
      ]
1650
    return (nl, nl)
1651

    
1652
  def CheckPrereq(self):
1653
    """Check prerequisites.
1654

1655
    This checks that the instance is in the cluster.
1656

1657
    """
1658
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1659
    assert self.instance is not None, \
1660
      "Cannot retrieve locked instance %s" % self.op.instance_name
1661

    
1662
    if instance.disk_template not in constants.DTS_COPYABLE:
1663
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1664
                                 instance.disk_template, errors.ECODE_STATE)
1665

    
1666
    node = self.cfg.GetNodeInfo(self.op.target_node)
1667
    assert node is not None, \
1668
      "Cannot retrieve locked node %s" % self.op.target_node
1669

    
1670
    self.target_node = target_node = node.name
1671

    
1672
    if target_node == instance.primary_node:
1673
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1674
                                 (instance.name, target_node),
1675
                                 errors.ECODE_STATE)
1676

    
1677
    bep = self.cfg.GetClusterInfo().FillBE(instance)
1678

    
1679
    for idx, dsk in enumerate(instance.disks):
1680
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1681
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1682
                                   " cannot copy" % idx, errors.ECODE_STATE)
1683

    
1684
    CheckNodeOnline(self, target_node)
1685
    CheckNodeNotDrained(self, target_node)
1686
    CheckNodeVmCapable(self, target_node)
1687
    cluster = self.cfg.GetClusterInfo()
1688
    group_info = self.cfg.GetNodeGroup(node.group)
1689
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1690
    CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1691
                           ignore=self.op.ignore_ipolicy)
1692

    
1693
    if instance.admin_state == constants.ADMINST_UP:
1694
      # check memory requirements on the secondary node
1695
      CheckNodeFreeMemory(self, target_node,
1696
                          "failing over instance %s" %
1697
                          instance.name, bep[constants.BE_MAXMEM],
1698
                          instance.hypervisor)
1699
    else:
1700
      self.LogInfo("Not checking memory on the secondary node as"
1701
                   " instance will not be started")
1702

    
1703
    # check bridge existance
1704
    CheckInstanceBridgesExist(self, instance, node=target_node)
1705

    
1706
  def Exec(self, feedback_fn):
1707
    """Move an instance.
1708

1709
    The move is done by shutting it down on its present node, copying
1710
    the data over (slow) and starting it on the new node.
1711

1712
    """
1713
    instance = self.instance
1714

    
1715
    source_node = instance.primary_node
1716
    target_node = self.target_node
1717

    
1718
    self.LogInfo("Shutting down instance %s on source node %s",
1719
                 instance.name, source_node)
1720

    
1721
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1722
            self.owned_locks(locking.LEVEL_NODE_RES))
1723

    
1724
    result = self.rpc.call_instance_shutdown(source_node, instance,
1725
                                             self.op.shutdown_timeout,
1726
                                             self.op.reason)
1727
    if self.op.ignore_consistency:
1728
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1729
                  " anyway. Please make sure node %s is down. Error details" %
1730
                  (instance.name, source_node, source_node), self.LogWarning)
1731
    else:
1732
      result.Raise("Could not shutdown instance %s on node %s" %
1733
                   (instance.name, source_node))
1734

    
1735
    # create the target disks
1736
    try:
1737
      CreateDisks(self, instance, target_node=target_node)
1738
    except errors.OpExecError:
1739
      self.LogWarning("Device creation failed")
1740
      self.cfg.ReleaseDRBDMinors(instance.name)
1741
      raise
1742

    
1743
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1744

    
1745
    errs = []
1746
    # activate, get path, copy the data over
1747
    for idx, disk in enumerate(instance.disks):
1748
      self.LogInfo("Copying data for disk %d", idx)
1749
      result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1750
                                               instance.name, True, idx)
1751
      if result.fail_msg:
1752
        self.LogWarning("Can't assemble newly created disk %d: %s",
1753
                        idx, result.fail_msg)
1754
        errs.append(result.fail_msg)
1755
        break
1756
      dev_path = result.payload
1757
      result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1758
                                             target_node, dev_path,
1759
                                             cluster_name)
1760
      if result.fail_msg:
1761
        self.LogWarning("Can't copy data over for disk %d: %s",
1762
                        idx, result.fail_msg)
1763
        errs.append(result.fail_msg)
1764
        break
1765

    
1766
    if errs:
1767
      self.LogWarning("Some disks failed to copy, aborting")
1768
      try:
1769
        RemoveDisks(self, instance, target_node=target_node)
1770
      finally:
1771
        self.cfg.ReleaseDRBDMinors(instance.name)
1772
        raise errors.OpExecError("Errors during disk copy: %s" %
1773
                                 (",".join(errs),))
1774

    
1775
    instance.primary_node = target_node
1776
    self.cfg.Update(instance, feedback_fn)
1777

    
1778
    self.LogInfo("Removing the disks on the original node")
1779
    RemoveDisks(self, instance, target_node=source_node)
1780

    
1781
    # Only start the instance if it's marked as up
1782
    if instance.admin_state == constants.ADMINST_UP:
1783
      self.LogInfo("Starting instance %s on node %s",
1784
                   instance.name, target_node)
1785

    
1786
      disks_ok, _ = AssembleInstanceDisks(self, instance,
1787
                                          ignore_secondaries=True)
1788
      if not disks_ok:
1789
        ShutdownInstanceDisks(self, instance)
1790
        raise errors.OpExecError("Can't activate the instance's disks")
1791

    
1792
      result = self.rpc.call_instance_start(target_node,
1793
                                            (instance, None, None), False,
1794
                                            self.op.reason)
1795
      msg = result.fail_msg
1796
      if msg:
1797
        ShutdownInstanceDisks(self, instance)
1798
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1799
                                 (instance.name, target_node, msg))
1800

    
1801

    
1802
class LUInstanceMultiAlloc(NoHooksLU):
1803
  """Allocates multiple instances at the same time.
1804

1805
  """
1806
  REQ_BGL = False
1807

    
1808
  def CheckArguments(self):
1809
    """Check arguments.
1810

1811
    """
1812
    nodes = []
1813
    for inst in self.op.instances:
1814
      if inst.iallocator is not None:
1815
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1816
                                   " instance objects", errors.ECODE_INVAL)
1817
      nodes.append(bool(inst.pnode))
1818
      if inst.disk_template in constants.DTS_INT_MIRROR:
1819
        nodes.append(bool(inst.snode))
1820

    
1821
    has_nodes = compat.any(nodes)
1822
    if compat.all(nodes) ^ has_nodes:
1823
      raise errors.OpPrereqError("There are instance objects providing"
1824
                                 " pnode/snode while others do not",
1825
                                 errors.ECODE_INVAL)
1826

    
1827
    if self.op.iallocator is None:
1828
      default_iallocator = self.cfg.GetDefaultIAllocator()
1829
      if default_iallocator and has_nodes:
1830
        self.op.iallocator = default_iallocator
1831
      else:
1832
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1833
                                   " given and no cluster-wide default"
1834
                                   " iallocator found; please specify either"
1835
                                   " an iallocator or nodes on the instances"
1836
                                   " or set a cluster-wide default iallocator",
1837
                                   errors.ECODE_INVAL)
1838

    
1839
    _CheckOpportunisticLocking(self.op)
1840

    
1841
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1842
    if dups:
1843
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1844
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1845

    
1846
  def ExpandNames(self):
1847
    """Calculate the locks.
1848

1849
    """
1850
    self.share_locks = ShareAll()
1851
    self.needed_locks = {
1852
      # iallocator will select nodes and even if no iallocator is used,
1853
      # collisions with LUInstanceCreate should be avoided
1854
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1855
      }
1856

    
1857
    if self.op.iallocator:
1858
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1859
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1860

    
1861
      if self.op.opportunistic_locking:
1862
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1863
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1864
    else:
1865
      nodeslist = []
1866
      for inst in self.op.instances:
1867
        inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1868
        nodeslist.append(inst.pnode)
1869
        if inst.snode is not None:
1870
          inst.snode = ExpandNodeName(self.cfg, inst.snode)
1871
          nodeslist.append(inst.snode)
1872

    
1873
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1874
      # Lock resources of instance's primary and secondary nodes (copy to
1875
      # prevent accidential modification)
1876
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1877

    
1878
  def CheckPrereq(self):
1879
    """Check prerequisite.
1880

1881
    """
1882
    cluster = self.cfg.GetClusterInfo()
1883
    default_vg = self.cfg.GetVGName()
1884
    ec_id = self.proc.GetECId()
1885

    
1886
    if self.op.opportunistic_locking:
1887
      # Only consider nodes for which a lock is held
1888
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1889
    else:
1890
      node_whitelist = None
1891

    
1892
    insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1893
                                         _ComputeNics(op, cluster, None,
1894
                                                      self.cfg, ec_id),
1895
                                         _ComputeFullBeParams(op, cluster),
1896
                                         node_whitelist)
1897
             for op in self.op.instances]
1898

    
1899
    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1900
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1901

    
1902
    ial.Run(self.op.iallocator)
1903

    
1904
    if not ial.success:
1905
      raise errors.OpPrereqError("Can't compute nodes using"
1906
                                 " iallocator '%s': %s" %
1907
                                 (self.op.iallocator, ial.info),
1908
                                 errors.ECODE_NORES)
1909

    
1910
    self.ia_result = ial.result
1911

    
1912
    if self.op.dry_run:
1913
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1914
        constants.JOB_IDS_KEY: [],
1915
        })
1916

    
1917
  def _ConstructPartialResult(self):
1918
    """Contructs the partial result.
1919

1920
    """
1921
    (allocatable, failed) = self.ia_result
1922
    return {
1923
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1924
        map(compat.fst, allocatable),
1925
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1926
      }
1927

    
1928
  def Exec(self, feedback_fn):
1929
    """Executes the opcode.
1930

1931
    """
1932
    op2inst = dict((op.instance_name, op) for op in self.op.instances)
1933
    (allocatable, failed) = self.ia_result
1934

    
1935
    jobs = []
1936
    for (name, nodes) in allocatable:
1937
      op = op2inst.pop(name)
1938

    
1939
      if len(nodes) > 1:
1940
        (op.pnode, op.snode) = nodes
1941
      else:
1942
        (op.pnode,) = nodes
1943

    
1944
      jobs.append([op])
1945

    
1946
    missing = set(op2inst.keys()) - set(failed)
1947
    assert not missing, \
1948
      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1949

    
1950
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
1951

    
1952

    
1953
class _InstNicModPrivate:
1954
  """Data structure for network interface modifications.
1955

1956
  Used by L{LUInstanceSetParams}.
1957

1958
  """
1959
  def __init__(self):
1960
    self.params = None
1961
    self.filled = None
1962

    
1963

    
1964
def _PrepareContainerMods(mods, private_fn):
1965
  """Prepares a list of container modifications by adding a private data field.
1966

1967
  @type mods: list of tuples; (operation, index, parameters)
1968
  @param mods: List of modifications
1969
  @type private_fn: callable or None
1970
  @param private_fn: Callable for constructing a private data field for a
1971
    modification
1972
  @rtype: list
1973

1974
  """
1975
  if private_fn is None:
1976
    fn = lambda: None
1977
  else:
1978
    fn = private_fn
1979

    
1980
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
1981

    
1982

    
1983
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
1984
  """Checks if nodes have enough physical CPUs
1985

1986
  This function checks if all given nodes have the needed number of
1987
  physical CPUs. In case any node has less CPUs or we cannot get the
1988
  information from the node, this function raises an OpPrereqError
1989
  exception.
1990

1991
  @type lu: C{LogicalUnit}
1992
  @param lu: a logical unit from which we get configuration data
1993
  @type nodenames: C{list}
1994
  @param nodenames: the list of node names to check
1995
  @type requested: C{int}
1996
  @param requested: the minimum acceptable number of physical CPUs
1997
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
1998
      or we cannot check the node
1999

2000
  """
2001
  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2002
  for node in nodenames:
2003
    info = nodeinfo[node]
2004
    info.Raise("Cannot get current information from node %s" % node,
2005
               prereq=True, ecode=errors.ECODE_ENVIRON)
2006
    (_, _, (hv_info, )) = info.payload
2007
    num_cpus = hv_info.get("cpu_total", None)
2008
    if not isinstance(num_cpus, int):
2009
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2010
                                 " on node %s, result was '%s'" %
2011
                                 (node, num_cpus), errors.ECODE_ENVIRON)
2012
    if requested > num_cpus:
2013
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2014
                                 "required" % (node, num_cpus, requested),
2015
                                 errors.ECODE_NORES)
2016

    
2017

    
2018
def GetItemFromContainer(identifier, kind, container):
2019
  """Return the item refered by the identifier.
2020

2021
  @type identifier: string
2022
  @param identifier: Item index or name or UUID
2023
  @type kind: string
2024
  @param kind: One-word item description
2025
  @type container: list
2026
  @param container: Container to get the item from
2027

2028
  """
2029
  # Index
2030
  try:
2031
    idx = int(identifier)
2032
    if idx == -1:
2033
      # Append
2034
      absidx = len(container) - 1
2035
    elif idx < 0:
2036
      raise IndexError("Not accepting negative indices other than -1")
2037
    elif idx > len(container):
2038
      raise IndexError("Got %s index %s, but there are only %s" %
2039
                       (kind, idx, len(container)))
2040
    else:
2041
      absidx = idx
2042
    return (absidx, container[idx])
2043
  except ValueError:
2044
    pass
2045

    
2046
  for idx, item in enumerate(container):
2047
    if item.uuid == identifier or item.name == identifier:
2048
      return (idx, item)
2049

    
2050
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2051
                             (kind, identifier), errors.ECODE_NOENT)
2052

    
2053

    
2054
def _ApplyContainerMods(kind, container, chgdesc, mods,
2055
                        create_fn, modify_fn, remove_fn):
2056
  """Applies descriptions in C{mods} to C{container}.
2057

2058
  @type kind: string
2059
  @param kind: One-word item description
2060
  @type container: list
2061
  @param container: Container to modify
2062
  @type chgdesc: None or list
2063
  @param chgdesc: List of applied changes
2064
  @type mods: list
2065
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2066
  @type create_fn: callable
2067
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2068
    receives absolute item index, parameters and private data object as added
2069
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2070
    as list
2071
  @type modify_fn: callable
2072
  @param modify_fn: Callback for modifying an existing item
2073
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2074
    and private data object as added by L{_PrepareContainerMods}, returns
2075
    changes as list
2076
  @type remove_fn: callable
2077
  @param remove_fn: Callback on removing item; receives absolute item index,
2078
    item and private data object as added by L{_PrepareContainerMods}
2079

2080
  """
2081
  for (op, identifier, params, private) in mods:
2082
    changes = None
2083

    
2084
    if op == constants.DDM_ADD:
2085
      # Calculate where item will be added
2086
      # When adding an item, identifier can only be an index
2087
      try:
2088
        idx = int(identifier)
2089
      except ValueError:
2090
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2091
                                   " identifier for %s" % constants.DDM_ADD,
2092
                                   errors.ECODE_INVAL)
2093
      if idx == -1:
2094
        addidx = len(container)
2095
      else:
2096
        if idx < 0:
2097
          raise IndexError("Not accepting negative indices other than -1")
2098
        elif idx > len(container):
2099
          raise IndexError("Got %s index %s, but there are only %s" %
2100
                           (kind, idx, len(container)))
2101
        addidx = idx
2102

    
2103
      if create_fn is None:
2104
        item = params
2105
      else:
2106
        (item, changes) = create_fn(addidx, params, private)
2107

    
2108
      if idx == -1:
2109
        container.append(item)
2110
      else:
2111
        assert idx >= 0
2112
        assert idx <= len(container)
2113
        # list.insert does so before the specified index
2114
        container.insert(idx, item)
2115
    else:
2116
      # Retrieve existing item
2117
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2118

    
2119
      if op == constants.DDM_REMOVE:
2120
        assert not params
2121

    
2122
        if remove_fn is not None:
2123
          remove_fn(absidx, item, private)
2124

    
2125
        changes = [("%s/%s" % (kind, absidx), "remove")]
2126

    
2127
        assert container[absidx] == item
2128
        del container[absidx]
2129
      elif op == constants.DDM_MODIFY:
2130
        if modify_fn is not None:
2131
          changes = modify_fn(absidx, item, params, private)
2132
      else:
2133
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2134

    
2135
    assert _TApplyContModsCbChanges(changes)
2136

    
2137
    if not (chgdesc is None or changes is None):
2138
      chgdesc.extend(changes)
2139

    
2140

    
2141
def _UpdateIvNames(base_index, disks):
2142
  """Updates the C{iv_name} attribute of disks.
2143

2144
  @type disks: list of L{objects.Disk}
2145

2146
  """
2147
  for (idx, disk) in enumerate(disks):
2148
    disk.iv_name = "disk/%s" % (base_index + idx, )
2149

    
2150

    
2151
class LUInstanceSetParams(LogicalUnit):
2152
  """Modifies an instances's parameters.
2153

2154
  """
2155
  HPATH = "instance-modify"
2156
  HTYPE = constants.HTYPE_INSTANCE
2157
  REQ_BGL = False
2158

    
2159
  @staticmethod
2160
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2161
    assert ht.TList(mods)
2162
    assert not mods or len(mods[0]) in (2, 3)
2163

    
2164
    if mods and len(mods[0]) == 2:
2165
      result = []
2166

    
2167
      addremove = 0
2168
      for op, params in mods:
2169
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2170
          result.append((op, -1, params))
2171
          addremove += 1
2172

    
2173
          if addremove > 1:
2174
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2175
                                       " supported at a time" % kind,
2176
                                       errors.ECODE_INVAL)
2177
        else:
2178
          result.append((constants.DDM_MODIFY, op, params))
2179

    
2180
      assert verify_fn(result)
2181
    else:
2182
      result = mods
2183

    
2184
    return result
2185

    
2186
  @staticmethod
2187
  def _CheckMods(kind, mods, key_types, item_fn):
2188
    """Ensures requested disk/NIC modifications are valid.
2189

2190
    """
2191
    for (op, _, params) in mods:
2192
      assert ht.TDict(params)
2193

    
2194
      # If 'key_types' is an empty dict, we assume we have an
2195
      # 'ext' template and thus do not ForceDictType
2196
      if key_types:
2197
        utils.ForceDictType(params, key_types)
2198

    
2199
      if op == constants.DDM_REMOVE:
2200
        if params:
2201
          raise errors.OpPrereqError("No settings should be passed when"
2202
                                     " removing a %s" % kind,
2203
                                     errors.ECODE_INVAL)
2204
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2205
        item_fn(op, params)
2206
      else:
2207
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2208

    
2209
  @staticmethod
2210
  def _VerifyDiskModification(op, params, excl_stor):
2211
    """Verifies a disk modification.
2212

2213
    """
2214
    if op == constants.DDM_ADD:
2215
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2216
      if mode not in constants.DISK_ACCESS_SET:
2217
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2218
                                   errors.ECODE_INVAL)
2219

    
2220
      size = params.get(constants.IDISK_SIZE, None)
2221
      if size is None:
2222
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2223
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2224

    
2225
      try:
2226
        size = int(size)
2227
      except (TypeError, ValueError), err:
2228
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2229
                                   errors.ECODE_INVAL)
2230

    
2231
      params[constants.IDISK_SIZE] = size
2232
      name = params.get(constants.IDISK_NAME, None)
2233
      if name is not None and name.lower() == constants.VALUE_NONE:
2234
        params[constants.IDISK_NAME] = None
2235

    
2236
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2237

    
2238
    elif op == constants.DDM_MODIFY:
2239
      if constants.IDISK_SIZE in params:
2240
        raise errors.OpPrereqError("Disk size change not possible, use"
2241
                                   " grow-disk", errors.ECODE_INVAL)
2242
      if len(params) > 2:
2243
        raise errors.OpPrereqError("Disk modification doesn't support"
2244
                                   " additional arbitrary parameters",
2245
                                   errors.ECODE_INVAL)
2246
      name = params.get(constants.IDISK_NAME, None)
2247
      if name is not None and name.lower() == constants.VALUE_NONE:
2248
        params[constants.IDISK_NAME] = None
2249

    
2250
  @staticmethod
2251
  def _VerifyNicModification(op, params):
2252
    """Verifies a network interface modification.
2253

2254
    """
2255
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2256
      ip = params.get(constants.INIC_IP, None)
2257
      name = params.get(constants.INIC_NAME, None)
2258
      req_net = params.get(constants.INIC_NETWORK, None)
2259
      link = params.get(constants.NIC_LINK, None)
2260
      mode = params.get(constants.NIC_MODE, None)
2261
      if name is not None and name.lower() == constants.VALUE_NONE:
2262
        params[constants.INIC_NAME] = None
2263
      if req_net is not None:
2264
        if req_net.lower() == constants.VALUE_NONE:
2265
          params[constants.INIC_NETWORK] = None
2266
          req_net = None
2267
        elif link is not None or mode is not None:
2268
          raise errors.OpPrereqError("If network is given"
2269
                                     " mode or link should not",
2270
                                     errors.ECODE_INVAL)
2271

    
2272
      if op == constants.DDM_ADD:
2273
        macaddr = params.get(constants.INIC_MAC, None)
2274
        if macaddr is None:
2275
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2276

    
2277
      if ip is not None:
2278
        if ip.lower() == constants.VALUE_NONE:
2279
          params[constants.INIC_IP] = None
2280
        else:
2281
          if ip.lower() == constants.NIC_IP_POOL:
2282
            if op == constants.DDM_ADD and req_net is None:
2283
              raise errors.OpPrereqError("If ip=pool, parameter network"
2284
                                         " cannot be none",
2285
                                         errors.ECODE_INVAL)
2286
          else:
2287
            if not netutils.IPAddress.IsValid(ip):
2288
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2289
                                         errors.ECODE_INVAL)
2290

    
2291
      if constants.INIC_MAC in params:
2292
        macaddr = params[constants.INIC_MAC]
2293
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2294
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2295

    
2296
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2297
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2298
                                     " modifying an existing NIC",
2299
                                     errors.ECODE_INVAL)
2300

    
2301
  def CheckArguments(self):
2302
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2303
            self.op.hvparams or self.op.beparams or self.op.os_name or
2304
            self.op.offline is not None or self.op.runtime_mem or
2305
            self.op.pnode):
2306
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2307

    
2308
    if self.op.hvparams:
2309
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2310
                           "hypervisor", "instance", "cluster")
2311

    
2312
    self.op.disks = self._UpgradeDiskNicMods(
2313
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2314
    self.op.nics = self._UpgradeDiskNicMods(
2315
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2316

    
2317
    if self.op.disks and self.op.disk_template is not None:
2318
      raise errors.OpPrereqError("Disk template conversion and other disk"
2319
                                 " changes not supported at the same time",
2320
                                 errors.ECODE_INVAL)
2321

    
2322
    if (self.op.disk_template and
2323
        self.op.disk_template in constants.DTS_INT_MIRROR and
2324
        self.op.remote_node is None):
2325
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2326
                                 " one requires specifying a secondary node",
2327
                                 errors.ECODE_INVAL)
2328

    
2329
    # Check NIC modifications
2330
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2331
                    self._VerifyNicModification)
2332

    
2333
    if self.op.pnode:
2334
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2335

    
2336
  def ExpandNames(self):
2337
    self._ExpandAndLockInstance()
2338
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2339
    # Can't even acquire node locks in shared mode as upcoming changes in
2340
    # Ganeti 2.6 will start to modify the node object on disk conversion
2341
    self.needed_locks[locking.LEVEL_NODE] = []
2342
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2343
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2344
    # Look node group to look up the ipolicy
2345
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2346

    
2347
  def DeclareLocks(self, level):
2348
    if level == locking.LEVEL_NODEGROUP:
2349
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2350
      # Acquire locks for the instance's nodegroups optimistically. Needs
2351
      # to be verified in CheckPrereq
2352
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2353
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2354
    elif level == locking.LEVEL_NODE:
2355
      self._LockInstancesNodes()
2356
      if self.op.disk_template and self.op.remote_node:
2357
        self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2358
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2359
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2360
      # Copy node locks
2361
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2362
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2363

    
2364
  def BuildHooksEnv(self):
2365
    """Build hooks env.
2366

2367
    This runs on the master, primary and secondaries.
2368

2369
    """
2370
    args = {}
2371
    if constants.BE_MINMEM in self.be_new:
2372
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2373
    if constants.BE_MAXMEM in self.be_new:
2374
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2375
    if constants.BE_VCPUS in self.be_new:
2376
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2377
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2378
    # information at all.
2379

    
2380
    if self._new_nics is not None:
2381
      nics = []
2382

    
2383
      for nic in self._new_nics:
2384
        n = copy.deepcopy(nic)
2385
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2386
        n.nicparams = nicparams
2387
        nics.append(NICToTuple(self, n))
2388

    
2389
      args["nics"] = nics
2390

    
2391
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2392
    if self.op.disk_template:
2393
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2394
    if self.op.runtime_mem:
2395
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2396

    
2397
    return env
2398

    
2399
  def BuildHooksNodes(self):
2400
    """Build hooks nodes.
2401

2402
    """
2403
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2404
    return (nl, nl)
2405

    
2406
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2407
                              old_params, cluster, pnode):
2408

    
2409
    update_params_dict = dict([(key, params[key])
2410
                               for key in constants.NICS_PARAMETERS
2411
                               if key in params])
2412

    
2413
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2414
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2415

    
2416
    new_net_uuid = None
2417
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2418
    if new_net_uuid_or_name:
2419
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2420
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2421

    
2422
    if old_net_uuid:
2423
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2424

    
2425
    if new_net_uuid:
2426
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2427
      if not netparams:
2428
        raise errors.OpPrereqError("No netparams found for the network"
2429
                                   " %s, probably not connected" %
2430
                                   new_net_obj.name, errors.ECODE_INVAL)
2431
      new_params = dict(netparams)
2432
    else:
2433
      new_params = GetUpdatedParams(old_params, update_params_dict)
2434

    
2435
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2436

    
2437
    new_filled_params = cluster.SimpleFillNIC(new_params)
2438
    objects.NIC.CheckParameterSyntax(new_filled_params)
2439

    
2440
    new_mode = new_filled_params[constants.NIC_MODE]
2441
    if new_mode == constants.NIC_MODE_BRIDGED:
2442
      bridge = new_filled_params[constants.NIC_LINK]
2443
      msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2444
      if msg:
2445
        msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2446
        if self.op.force:
2447
          self.warn.append(msg)
2448
        else:
2449
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2450

    
2451
    elif new_mode == constants.NIC_MODE_ROUTED:
2452
      ip = params.get(constants.INIC_IP, old_ip)
2453
      if ip is None:
2454
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2455
                                   " on a routed NIC", errors.ECODE_INVAL)
2456

    
2457
    elif new_mode == constants.NIC_MODE_OVS:
2458
      # TODO: check OVS link
2459
      self.LogInfo("OVS links are currently not checked for correctness")
2460

    
2461
    if constants.INIC_MAC in params:
2462
      mac = params[constants.INIC_MAC]
2463
      if mac is None:
2464
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2465
                                   errors.ECODE_INVAL)
2466
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2467
        # otherwise generate the MAC address
2468
        params[constants.INIC_MAC] = \
2469
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2470
      else:
2471
        # or validate/reserve the current one
2472
        try:
2473
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2474
        except errors.ReservationError:
2475
          raise errors.OpPrereqError("MAC address '%s' already in use"
2476
                                     " in cluster" % mac,
2477
                                     errors.ECODE_NOTUNIQUE)
2478
    elif new_net_uuid != old_net_uuid:
2479

    
2480
      def get_net_prefix(net_uuid):
2481
        mac_prefix = None
2482
        if net_uuid:
2483
          nobj = self.cfg.GetNetwork(net_uuid)
2484
          mac_prefix = nobj.mac_prefix
2485

    
2486
        return mac_prefix
2487

    
2488
      new_prefix = get_net_prefix(new_net_uuid)
2489
      old_prefix = get_net_prefix(old_net_uuid)
2490
      if old_prefix != new_prefix:
2491
        params[constants.INIC_MAC] = \
2492
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2493

    
2494
    # if there is a change in (ip, network) tuple
2495
    new_ip = params.get(constants.INIC_IP, old_ip)
2496
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2497
      if new_ip:
2498
        # if IP is pool then require a network and generate one IP
2499
        if new_ip.lower() == constants.NIC_IP_POOL:
2500
          if new_net_uuid:
2501
            try:
2502
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2503
            except errors.ReservationError:
2504
              raise errors.OpPrereqError("Unable to get a free IP"
2505
                                         " from the address pool",
2506
                                         errors.ECODE_STATE)
2507
            self.LogInfo("Chose IP %s from network %s",
2508
                         new_ip,
2509
                         new_net_obj.name)
2510
            params[constants.INIC_IP] = new_ip
2511
          else:
2512
            raise errors.OpPrereqError("ip=pool, but no network found",
2513
                                       errors.ECODE_INVAL)
2514
        # Reserve new IP if in the new network if any
2515
        elif new_net_uuid:
2516
          try:
2517
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2518
            self.LogInfo("Reserving IP %s in network %s",
2519
                         new_ip, new_net_obj.name)
2520
          except errors.ReservationError:
2521
            raise errors.OpPrereqError("IP %s not available in network %s" %
2522
                                       (new_ip, new_net_obj.name),
2523
                                       errors.ECODE_NOTUNIQUE)
2524
        # new network is None so check if new IP is a conflicting IP
2525
        elif self.op.conflicts_check:
2526
          _CheckForConflictingIp(self, new_ip, pnode)
2527

    
2528
      # release old IP if old network is not None
2529
      if old_ip and old_net_uuid:
2530
        try:
2531
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2532
        except errors.AddressPoolError:
2533
          logging.warning("Release IP %s not contained in network %s",
2534
                          old_ip, old_net_obj.name)
2535

    
2536
    # there are no changes in (ip, network) tuple and old network is not None
2537
    elif (old_net_uuid is not None and
2538
          (req_link is not None or req_mode is not None)):
2539
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2540
                                 " a NIC that is connected to a network",
2541
                                 errors.ECODE_INVAL)
2542

    
2543
    private.params = new_params
2544
    private.filled = new_filled_params
2545

    
2546
  def _PreCheckDiskTemplate(self, pnode_info):
2547
    """CheckPrereq checks related to a new disk template."""
2548
    # Arguments are passed to avoid configuration lookups
2549
    instance = self.instance
2550
    pnode = instance.primary_node
2551
    cluster = self.cluster
2552
    if instance.disk_template == self.op.disk_template:
2553
      raise errors.OpPrereqError("Instance already has disk template %s" %
2554
                                 instance.disk_template, errors.ECODE_INVAL)
2555

    
2556
    if (instance.disk_template,
2557
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2558
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2559
                                 " %s to %s" % (instance.disk_template,
2560
                                                self.op.disk_template),
2561
                                 errors.ECODE_INVAL)
2562
    CheckInstanceState(self, instance, INSTANCE_DOWN,
2563
                       msg="cannot change disk template")
2564
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2565
      if self.op.remote_node == pnode:
2566
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2567
                                   " as the primary node of the instance" %
2568
                                   self.op.remote_node, errors.ECODE_STATE)
2569
      CheckNodeOnline(self, self.op.remote_node)
2570
      CheckNodeNotDrained(self, self.op.remote_node)
2571
      # FIXME: here we assume that the old instance type is DT_PLAIN
2572
      assert instance.disk_template == constants.DT_PLAIN
2573
      disks = [{constants.IDISK_SIZE: d.size,
2574
                constants.IDISK_VG: d.logical_id[0]}
2575
               for d in instance.disks]
2576
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2577
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2578

    
2579
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2580
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2581
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2582
                                                              snode_group)
2583
      CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2584
                             ignore=self.op.ignore_ipolicy)
2585
      if pnode_info.group != snode_info.group:
2586
        self.LogWarning("The primary and secondary nodes are in two"
2587
                        " different node groups; the disk parameters"
2588
                        " from the first disk's node group will be"
2589
                        " used")
2590

    
2591
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2592
      # Make sure none of the nodes require exclusive storage
2593
      nodes = [pnode_info]
2594
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2595
        assert snode_info
2596
        nodes.append(snode_info)
2597
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2598
      if compat.any(map(has_es, nodes)):
2599
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2600
                  " storage is enabled" % (instance.disk_template,
2601
                                           self.op.disk_template))
2602
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2603

    
2604
  def _PreCheckDisks(self, ispec):
2605
    """CheckPrereq checks related to disk changes.
2606

2607
    @type ispec: dict
2608
    @param ispec: instance specs to be updated with the new disks
2609

2610
    """
2611
    instance = self.instance
2612
    self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2613

    
2614
    excl_stor = compat.any(
2615
      rpc.GetExclusiveStorageForNodeNames(self.cfg, instance.all_nodes).values()
2616
      )
2617

    
2618
    # Check disk modifications. This is done here and not in CheckArguments
2619
    # (as with NICs), because we need to know the instance's disk template
2620
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2621
    if instance.disk_template == constants.DT_EXT:
2622
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2623
    else:
2624
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2625
                      ver_fn)
2626

    
2627
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2628

    
2629
    # Check the validity of the `provider' parameter
2630
    if instance.disk_template in constants.DT_EXT:
2631
      for mod in self.diskmod:
2632
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2633
        if mod[0] == constants.DDM_ADD:
2634
          if ext_provider is None:
2635
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2636
                                       " '%s' missing, during disk add" %
2637
                                       (constants.DT_EXT,
2638
                                        constants.IDISK_PROVIDER),
2639
                                       errors.ECODE_NOENT)
2640
        elif mod[0] == constants.DDM_MODIFY:
2641
          if ext_provider:
2642
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2643
                                       " modification" %
2644
                                       constants.IDISK_PROVIDER,
2645
                                       errors.ECODE_INVAL)
2646
    else:
2647
      for mod in self.diskmod:
2648
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2649
        if ext_provider is not None:
2650
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2651
                                     " instances of type '%s'" %
2652
                                     (constants.IDISK_PROVIDER,
2653
                                      constants.DT_EXT),
2654
                                     errors.ECODE_INVAL)
2655

    
2656
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2657
      raise errors.OpPrereqError("Disk operations not supported for"
2658
                                 " diskless instances", errors.ECODE_INVAL)
2659

    
2660
    def _PrepareDiskMod(_, disk, params, __):
2661
      disk.name = params.get(constants.IDISK_NAME, None)
2662

    
2663
    # Verify disk changes (operating on a copy)
2664
    disks = copy.deepcopy(instance.disks)
2665
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2666
                        _PrepareDiskMod, None)
2667
    utils.ValidateDeviceNames("disk", disks)
2668
    if len(disks) > constants.MAX_DISKS:
2669
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2670
                                 " more" % constants.MAX_DISKS,
2671
                                 errors.ECODE_STATE)
2672
    disk_sizes = [disk.size for disk in instance.disks]
2673
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2674
                      self.diskmod if op == constants.DDM_ADD)
2675
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2676
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2677

    
2678
    if self.op.offline is not None and self.op.offline:
2679
      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2680
                         msg="can't change to offline")
2681

    
2682
  def CheckPrereq(self):
2683
    """Check prerequisites.
2684

2685
    This only checks the instance list against the existing names.
2686

2687
    """
2688
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2689
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2690

    
2691
    cluster = self.cluster = self.cfg.GetClusterInfo()
2692
    assert self.instance is not None, \
2693
      "Cannot retrieve locked instance %s" % self.op.instance_name
2694

    
2695
    pnode = instance.primary_node
2696

    
2697
    self.warn = []
2698

    
2699
    if (self.op.pnode is not None and self.op.pnode != pnode and
2700
        not self.op.force):
2701
      # verify that the instance is not up
2702
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2703
                                                  instance.hypervisor)
2704
      if instance_info.fail_msg:
2705
        self.warn.append("Can't get instance runtime information: %s" %
2706
                         instance_info.fail_msg)
2707
      elif instance_info.payload:
2708
        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2709
                                   errors.ECODE_STATE)
2710

    
2711
    assert pnode in self.owned_locks(locking.LEVEL_NODE)
2712
    nodelist = list(instance.all_nodes)
2713
    pnode_info = self.cfg.GetNodeInfo(pnode)
2714

    
2715
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2716
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2717
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2718

    
2719
    # dictionary with instance information after the modification
2720
    ispec = {}
2721

    
2722
    # Prepare NIC modifications
2723
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2724

    
2725
    # OS change
2726
    if self.op.os_name and not self.op.force:
2727
      CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2728
                     self.op.force_variant)
2729
      instance_os = self.op.os_name
2730
    else:
2731
      instance_os = instance.os
2732

    
2733
    assert not (self.op.disk_template and self.op.disks), \
2734
      "Can't modify disk template and apply disk changes at the same time"
2735

    
2736
    if self.op.disk_template:
2737
      self._PreCheckDiskTemplate(pnode_info)
2738

    
2739
    self._PreCheckDisks(ispec)
2740

    
2741
    # hvparams processing
2742
    if self.op.hvparams:
2743
      hv_type = instance.hypervisor
2744
      i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2745
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2746
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2747

    
2748
      # local check
2749
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2750
      CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2751
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2752
      self.hv_inst = i_hvdict # the new dict (without defaults)
2753
    else:
2754
      self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2755
                                              instance.hvparams)
2756
      self.hv_new = self.hv_inst = {}
2757

    
2758
    # beparams processing
2759
    if self.op.beparams:
2760
      i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2761
                                  use_none=True)
2762
      objects.UpgradeBeParams(i_bedict)
2763
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2764
      be_new = cluster.SimpleFillBE(i_bedict)
2765
      self.be_proposed = self.be_new = be_new # the new actual values
2766
      self.be_inst = i_bedict # the new dict (without defaults)
2767
    else:
2768
      self.be_new = self.be_inst = {}
2769
      self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2770
    be_old = cluster.FillBE(instance)
2771

    
2772
    # CPU param validation -- checking every time a parameter is
2773
    # changed to cover all cases where either CPU mask or vcpus have
2774
    # changed
2775
    if (constants.BE_VCPUS in self.be_proposed and
2776
        constants.HV_CPU_MASK in self.hv_proposed):
2777
      cpu_list = \
2778
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2779
      # Verify mask is consistent with number of vCPUs. Can skip this
2780
      # test if only 1 entry in the CPU mask, which means same mask
2781
      # is applied to all vCPUs.
2782
      if (len(cpu_list) > 1 and
2783
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2784
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2785
                                   " CPU mask [%s]" %
2786
                                   (self.be_proposed[constants.BE_VCPUS],
2787
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2788
                                   errors.ECODE_INVAL)
2789

    
2790
      # Only perform this test if a new CPU mask is given
2791
      if constants.HV_CPU_MASK in self.hv_new:
2792
        # Calculate the largest CPU number requested
2793
        max_requested_cpu = max(map(max, cpu_list))
2794
        # Check that all of the instance's nodes have enough physical CPUs to
2795
        # satisfy the requested CPU mask
2796
        _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2797
                                max_requested_cpu + 1, instance.hypervisor)
2798

    
2799
    # osparams processing
2800
    if self.op.osparams:
2801
      i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2802
      CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2803
      self.os_inst = i_osdict # the new dict (without defaults)
2804
    else:
2805
      self.os_inst = {}
2806

    
2807
    #TODO(dynmem): do the appropriate check involving MINMEM
2808
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2809
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2810
      mem_check_list = [pnode]
2811
      if be_new[constants.BE_AUTO_BALANCE]:
2812
        # either we changed auto_balance to yes or it was from before
2813
        mem_check_list.extend(instance.secondary_nodes)
2814
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2815
                                                  instance.hypervisor)
2816
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2817
                                         [instance.hypervisor], False)
2818
      pninfo = nodeinfo[pnode]
2819
      msg = pninfo.fail_msg
2820
      if msg:
2821
        # Assume the primary node is unreachable and go ahead
2822
        self.warn.append("Can't get info from primary node %s: %s" %
2823
                         (pnode, msg))
2824
      else:
2825
        (_, _, (pnhvinfo, )) = pninfo.payload
2826
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2827
          self.warn.append("Node data from primary node %s doesn't contain"
2828
                           " free memory information" % pnode)
2829
        elif instance_info.fail_msg:
2830
          self.warn.append("Can't get instance runtime information: %s" %
2831
                           instance_info.fail_msg)
2832
        else:
2833
          if instance_info.payload:
2834
            current_mem = int(instance_info.payload["memory"])
2835
          else:
2836
            # Assume instance not running
2837
            # (there is a slight race condition here, but it's not very
2838
            # probable, and we have no other way to check)
2839
            # TODO: Describe race condition
2840
            current_mem = 0
2841
          #TODO(dynmem): do the appropriate check involving MINMEM
2842
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2843
                      pnhvinfo["memory_free"])
2844
          if miss_mem > 0:
2845
            raise errors.OpPrereqError("This change will prevent the instance"
2846
                                       " from starting, due to %d MB of memory"
2847
                                       " missing on its primary node" %
2848
                                       miss_mem, errors.ECODE_NORES)
2849

    
2850
      if be_new[constants.BE_AUTO_BALANCE]:
2851
        for node, nres in nodeinfo.items():
2852
          if node not in instance.secondary_nodes:
2853
            continue
2854
          nres.Raise("Can't get info from secondary node %s" % node,
2855
                     prereq=True, ecode=errors.ECODE_STATE)
2856
          (_, _, (nhvinfo, )) = nres.payload
2857
          if not isinstance(nhvinfo.get("memory_free", None), int):
2858
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2859
                                       " memory information" % node,
2860
                                       errors.ECODE_STATE)
2861
          #TODO(dynmem): do the appropriate check involving MINMEM
2862
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2863
            raise errors.OpPrereqError("This change will prevent the instance"
2864
                                       " from failover to its secondary node"
2865
                                       " %s, due to not enough memory" % node,
2866
                                       errors.ECODE_STATE)
2867

    
2868
    if self.op.runtime_mem:
2869
      remote_info = self.rpc.call_instance_info(instance.primary_node,
2870
                                                instance.name,
2871
                                                instance.hypervisor)
2872
      remote_info.Raise("Error checking node %s" % instance.primary_node)
2873
      if not remote_info.payload: # not running already
2874
        raise errors.OpPrereqError("Instance %s is not running" %
2875
                                   instance.name, errors.ECODE_STATE)
2876

    
2877
      current_memory = remote_info.payload["memory"]
2878
      if (not self.op.force and
2879
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2880
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2881
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2882
                                   " and %d MB of memory unless --force is"
2883
                                   " given" %
2884
                                   (instance.name,
2885
                                    self.be_proposed[constants.BE_MINMEM],
2886
                                    self.be_proposed[constants.BE_MAXMEM]),
2887
                                   errors.ECODE_INVAL)
2888

    
2889
      delta = self.op.runtime_mem - current_memory
2890
      if delta > 0:
2891
        CheckNodeFreeMemory(self, instance.primary_node,
2892
                            "ballooning memory for instance %s" %
2893
                            instance.name, delta, instance.hypervisor)
2894

    
2895
    def _PrepareNicCreate(_, params, private):
2896
      self._PrepareNicModification(params, private, None, None,
2897
                                   {}, cluster, pnode)
2898
      return (None, None)
2899

    
2900
    def _PrepareNicMod(_, nic, params, private):
2901
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2902
                                   nic.nicparams, cluster, pnode)
2903
      return None
2904

    
2905
    def _PrepareNicRemove(_, params, __):
2906
      ip = params.ip
2907
      net = params.network
2908
      if net is not None and ip is not None:
2909
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2910

    
2911
    # Verify NIC changes (operating on copy)
2912
    nics = instance.nics[:]
2913
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2914
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2915
    if len(nics) > constants.MAX_NICS:
2916
      raise errors.OpPrereqError("Instance has too many network interfaces"
2917
                                 " (%d), cannot add more" % constants.MAX_NICS,
2918
                                 errors.ECODE_STATE)
2919

    
2920
    # Pre-compute NIC changes (necessary to use result in hooks)
2921
    self._nic_chgdesc = []
2922
    if self.nicmod:
2923
      # Operate on copies as this is still in prereq
2924
      nics = [nic.Copy() for nic in instance.nics]
2925
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2926
                          self._CreateNewNic, self._ApplyNicMods, None)
2927
      # Verify that NIC names are unique and valid
2928
      utils.ValidateDeviceNames("NIC", nics)
2929
      self._new_nics = nics
2930
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2931
    else:
2932
      self._new_nics = None
2933
      ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2934

    
2935
    if not self.op.ignore_ipolicy:
2936
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2937
                                                              group_info)
2938

    
2939
      # Fill ispec with backend parameters
2940
      ispec[constants.ISPEC_SPINDLE_USE] = \
2941
        self.be_new.get(constants.BE_SPINDLE_USE, None)
2942
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2943
                                                         None)
2944

    
2945
      # Copy ispec to verify parameters with min/max values separately
2946
      if self.op.disk_template:
2947
        new_disk_template = self.op.disk_template
2948
      else:
2949
        new_disk_template = instance.disk_template
2950
      ispec_max = ispec.copy()
2951
      ispec_max[constants.ISPEC_MEM_SIZE] = \
2952
        self.be_new.get(constants.BE_MAXMEM, None)
2953
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
2954
                                                     new_disk_template)
2955
      ispec_min = ispec.copy()
2956
      ispec_min[constants.ISPEC_MEM_SIZE] = \
2957
        self.be_new.get(constants.BE_MINMEM, None)
2958
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
2959
                                                     new_disk_template)
2960

    
2961
      if (res_max or res_min):
2962
        # FIXME: Improve error message by including information about whether
2963
        # the upper or lower limit of the parameter fails the ipolicy.
2964
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
2965
               (group_info, group_info.name,
2966
                utils.CommaJoin(set(res_max + res_min))))
2967
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2968

    
2969
  def _ConvertPlainToDrbd(self, feedback_fn):
2970
    """Converts an instance from plain to drbd.
2971

2972
    """
2973
    feedback_fn("Converting template to drbd")
2974
    instance = self.instance
2975
    pnode = instance.primary_node
2976
    snode = self.op.remote_node
2977

    
2978
    assert instance.disk_template == constants.DT_PLAIN
2979

    
2980
    # create a fake disk info for _GenerateDiskTemplate
2981
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
2982
                  constants.IDISK_VG: d.logical_id[0],
2983
                  constants.IDISK_NAME: d.name}
2984
                 for d in instance.disks]
2985
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
2986
                                     instance.name, pnode, [snode],
2987
                                     disk_info, None, None, 0, feedback_fn,
2988
                                     self.diskparams)
2989
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
2990
                                        self.diskparams)
2991
    p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
2992
    s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
2993
    info = GetInstanceInfoText(instance)
2994
    feedback_fn("Creating additional volumes...")
2995
    # first, create the missing data and meta devices
2996
    for disk in anno_disks:
2997
      # unfortunately this is... not too nice
2998
      CreateSingleBlockDev(self, pnode, instance, disk.children[1],
2999
                           info, True, p_excl_stor)
3000
      for child in disk.children:
3001
        CreateSingleBlockDev(self, snode, instance, child, info, True,
3002
                             s_excl_stor)
3003
    # at this stage, all new LVs have been created, we can rename the
3004
    # old ones
3005
    feedback_fn("Renaming original volumes...")
3006
    rename_list = [(o, n.children[0].logical_id)
3007
                   for (o, n) in zip(instance.disks, new_disks)]
3008
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
3009
    result.Raise("Failed to rename original LVs")
3010

    
3011
    feedback_fn("Initializing DRBD devices...")
3012
    # all child devices are in place, we can now create the DRBD devices
3013
    try:
3014
      for disk in anno_disks:
3015
        for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3016
          f_create = node == pnode
3017
          CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3018
                               excl_stor)
3019
    except errors.GenericError, e:
3020
      feedback_fn("Initializing of DRBD devices failed;"
3021
                  " renaming back original volumes...")
3022
      for disk in new_disks:
3023
        self.cfg.SetDiskID(disk, pnode)
3024
      rename_back_list = [(n.children[0], o.logical_id)
3025
                          for (n, o) in zip(new_disks, instance.disks)]
3026
      result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3027
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3028
      raise
3029

    
3030
    # at this point, the instance has been modified
3031
    instance.disk_template = constants.DT_DRBD8
3032
    instance.disks = new_disks
3033
    self.cfg.Update(instance, feedback_fn)
3034

    
3035
    # Release node locks while waiting for sync
3036
    ReleaseLocks(self, locking.LEVEL_NODE)
3037

    
3038
    # disks are created, waiting for sync
3039
    disk_abort = not WaitForSync(self, instance,
3040
                                 oneshot=not self.op.wait_for_sync)
3041
    if disk_abort:
3042
      raise errors.OpExecError("There are some degraded disks for"
3043
                               " this instance, please cleanup manually")
3044

    
3045
    # Node resource locks will be released by caller
3046

    
3047
  def _ConvertDrbdToPlain(self, feedback_fn):
3048
    """Converts an instance from drbd to plain.
3049

3050
    """
3051
    instance = self.instance
3052

    
3053
    assert len(instance.secondary_nodes) == 1
3054
    assert instance.disk_template == constants.DT_DRBD8
3055

    
3056
    pnode = instance.primary_node
3057
    snode = instance.secondary_nodes[0]
3058
    feedback_fn("Converting template to plain")
3059

    
3060
    old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3061
    new_disks = [d.children[0] for d in instance.disks]
3062

    
3063
    # copy over size, mode and name
3064
    for parent, child in zip(old_disks, new_disks):
3065
      child.size = parent.size
3066
      child.mode = parent.mode
3067
      child.name = parent.name
3068

    
3069
    # this is a DRBD disk, return its port to the pool
3070
    # NOTE: this must be done right before the call to cfg.Update!
3071
    for disk in old_disks:
3072
      tcp_port = disk.logical_id[2]
3073
      self.cfg.AddTcpUdpPort(tcp_port)
3074

    
3075
    # update instance structure
3076
    instance.disks = new_disks
3077
    instance.disk_template = constants.DT_PLAIN
3078
    _UpdateIvNames(0, instance.disks)
3079
    self.cfg.Update(instance, feedback_fn)
3080

    
3081
    # Release locks in case removing disks takes a while
3082
    ReleaseLocks(self, locking.LEVEL_NODE)
3083

    
3084
    feedback_fn("Removing volumes on the secondary node...")
3085
    for disk in old_disks:
3086
      self.cfg.SetDiskID(disk, snode)
3087
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3088
      if msg:
3089
        self.LogWarning("Could not remove block device %s on node %s,"
3090
                        " continuing anyway: %s", disk.iv_name, snode, msg)
3091

    
3092
    feedback_fn("Removing unneeded volumes on the primary node...")
3093
    for idx, disk in enumerate(old_disks):
3094
      meta = disk.children[1]
3095
      self.cfg.SetDiskID(meta, pnode)
3096
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3097
      if msg:
3098
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3099
                        " continuing anyway: %s", idx, pnode, msg)
3100

    
3101
  def _CreateNewDisk(self, idx, params, _):
3102
    """Creates a new disk.
3103

3104
    """
3105
    instance = self.instance
3106

    
3107
    # add a new disk
3108
    if instance.disk_template in constants.DTS_FILEBASED:
3109
      (file_driver, file_path) = instance.disks[0].logical_id
3110
      file_path = os.path.dirname(file_path)
3111
    else:
3112
      file_driver = file_path = None
3113

    
3114
    disk = \
3115
      GenerateDiskTemplate(self, instance.disk_template, instance.name,
3116
                           instance.primary_node, instance.secondary_nodes,
3117
                           [params], file_path, file_driver, idx,
3118
                           self.Log, self.diskparams)[0]
3119

    
3120
    new_disks = CreateDisks(self, instance, disks=[disk])
3121

    
3122
    if self.cluster.prealloc_wipe_disks:
3123
      # Wipe new disk
3124
      WipeOrCleanupDisks(self, instance,
3125
                         disks=[(idx, disk, 0)],
3126
                         cleanup=new_disks)
3127

    
3128
    return (disk, [
3129
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3130
      ])
3131

    
3132
  @staticmethod
3133
  def _ModifyDisk(idx, disk, params, _):
3134
    """Modifies a disk.
3135

3136
    """
3137
    changes = []
3138
    mode = params.get(constants.IDISK_MODE, None)
3139
    if mode:
3140
      disk.mode = mode
3141
      changes.append(("disk.mode/%d" % idx, disk.mode))
3142

    
3143
    name = params.get(constants.IDISK_NAME, None)
3144
    disk.name = name
3145
    changes.append(("disk.name/%d" % idx, disk.name))
3146

    
3147
    return changes
3148

    
3149
  def _RemoveDisk(self, idx, root, _):
3150
    """Removes a disk.
3151

3152
    """
3153
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3154
    for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3155
      self.cfg.SetDiskID(disk, node)
3156
      msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3157
      if msg:
3158
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3159
                        " continuing anyway", idx, node, msg)
3160

    
3161
    # if this is a DRBD disk, return its port to the pool
3162
    if root.dev_type in constants.LDS_DRBD:
3163
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3164

    
3165
  def _CreateNewNic(self, idx, params, private):
3166
    """Creates data structure for a new network interface.
3167

3168
    """
3169
    mac = params[constants.INIC_MAC]
3170
    ip = params.get(constants.INIC_IP, None)
3171
    net = params.get(constants.INIC_NETWORK, None)
3172
    name = params.get(constants.INIC_NAME, None)
3173
    net_uuid = self.cfg.LookupNetwork(net)
3174
    #TODO: not private.filled?? can a nic have no nicparams??
3175
    nicparams = private.filled
3176
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3177
                       nicparams=nicparams)
3178
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3179

    
3180
    return (nobj, [
3181
      ("nic.%d" % idx,
3182
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3183
       (mac, ip, private.filled[constants.NIC_MODE],
3184
       private.filled[constants.NIC_LINK],
3185
       net)),
3186
      ])
3187

    
3188
  def _ApplyNicMods(self, idx, nic, params, private):
3189
    """Modifies a network interface.
3190

3191
    """
3192
    changes = []
3193

    
3194
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3195
      if key in params:
3196
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3197
        setattr(nic, key, params[key])
3198

    
3199
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3200
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3201
    if new_net_uuid != nic.network:
3202
      changes.append(("nic.network/%d" % idx, new_net))
3203
      nic.network = new_net_uuid
3204

    
3205
    if private.filled:
3206
      nic.nicparams = private.filled
3207

    
3208
      for (key, val) in nic.nicparams.items():
3209
        changes.append(("nic.%s/%d" % (key, idx), val))
3210

    
3211
    return changes
3212

    
3213
  def Exec(self, feedback_fn):
3214
    """Modifies an instance.
3215

3216
    All parameters take effect only at the next restart of the instance.
3217

3218
    """
3219
    # Process here the warnings from CheckPrereq, as we don't have a
3220
    # feedback_fn there.
3221
    # TODO: Replace with self.LogWarning
3222
    for warn in self.warn:
3223
      feedback_fn("WARNING: %s" % warn)
3224

    
3225
    assert ((self.op.disk_template is None) ^
3226
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3227
      "Not owning any node resource locks"
3228

    
3229
    result = []
3230
    instance = self.instance
3231

    
3232
    # New primary node
3233
    if self.op.pnode:
3234
      instance.primary_node = self.op.pnode
3235

    
3236
    # runtime memory
3237
    if self.op.runtime_mem:
3238
      rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3239
                                                     instance,
3240
                                                     self.op.runtime_mem)
3241
      rpcres.Raise("Cannot modify instance runtime memory")
3242
      result.append(("runtime_memory", self.op.runtime_mem))
3243

    
3244
    # Apply disk changes
3245
    _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3246
                        self._CreateNewDisk, self._ModifyDisk,
3247
                        self._RemoveDisk)
3248
    _UpdateIvNames(0, instance.disks)
3249

    
3250
    if self.op.disk_template:
3251
      if __debug__:
3252
        check_nodes = set(instance.all_nodes)
3253
        if self.op.remote_node:
3254
          check_nodes.add(self.op.remote_node)
3255
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3256
          owned = self.owned_locks(level)
3257
          assert not (check_nodes - owned), \
3258
            ("Not owning the correct locks, owning %r, expected at least %r" %
3259
             (owned, check_nodes))
3260

    
3261
      r_shut = ShutdownInstanceDisks(self, instance)
3262
      if not r_shut:
3263
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3264
                                 " proceed with disk template conversion")
3265
      mode = (instance.disk_template, self.op.disk_template)
3266
      try:
3267
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3268
      except:
3269
        self.cfg.ReleaseDRBDMinors(instance.name)
3270
        raise
3271
      result.append(("disk_template", self.op.disk_template))
3272

    
3273
      assert instance.disk_template == self.op.disk_template, \
3274
        ("Expected disk template '%s', found '%s'" %
3275
         (self.op.disk_template, instance.disk_template))
3276

    
3277
    # Release node and resource locks if there are any (they might already have
3278
    # been released during disk conversion)
3279
    ReleaseLocks(self, locking.LEVEL_NODE)
3280
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3281

    
3282
    # Apply NIC changes
3283
    if self._new_nics is not None:
3284
      instance.nics = self._new_nics
3285
      result.extend(self._nic_chgdesc)
3286

    
3287
    # hvparams changes
3288
    if self.op.hvparams:
3289
      instance.hvparams = self.hv_inst
3290
      for key, val in self.op.hvparams.iteritems():
3291
        result.append(("hv/%s" % key, val))
3292

    
3293
    # beparams changes
3294
    if self.op.beparams:
3295
      instance.beparams = self.be_inst
3296
      for key, val in self.op.beparams.iteritems():
3297
        result.append(("be/%s" % key, val))
3298

    
3299
    # OS change
3300
    if self.op.os_name:
3301
      instance.os = self.op.os_name
3302

    
3303
    # osparams changes
3304
    if self.op.osparams:
3305
      instance.osparams = self.os_inst
3306
      for key, val in self.op.osparams.iteritems():
3307
        result.append(("os/%s" % key, val))
3308

    
3309
    if self.op.offline is None:
3310
      # Ignore
3311
      pass
3312
    elif self.op.offline:
3313
      # Mark instance as offline
3314
      self.cfg.MarkInstanceOffline(instance.name)
3315
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3316
    else:
3317
      # Mark instance as online, but stopped
3318
      self.cfg.MarkInstanceDown(instance.name)
3319
      result.append(("admin_state", constants.ADMINST_DOWN))
3320

    
3321
    self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3322

    
3323
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3324
                self.owned_locks(locking.LEVEL_NODE)), \
3325
      "All node locks should have been released by now"
3326

    
3327
    return result
3328

    
3329
  _DISK_CONVERSIONS = {
3330
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3331
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3332
    }
3333

    
3334

    
3335
class LUInstanceChangeGroup(LogicalUnit):
3336
  HPATH = "instance-change-group"
3337
  HTYPE = constants.HTYPE_INSTANCE
3338
  REQ_BGL = False
3339

    
3340
  def ExpandNames(self):
3341
    self.share_locks = ShareAll()
3342

    
3343
    self.needed_locks = {
3344
      locking.LEVEL_NODEGROUP: [],
3345
      locking.LEVEL_NODE: [],
3346
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3347
      }
3348

    
3349
    self._ExpandAndLockInstance()
3350

    
3351
    if self.op.target_groups:
3352
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3353
                                  self.op.target_groups)
3354
    else:
3355
      self.req_target_uuids = None
3356

    
3357
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3358

    
3359
  def DeclareLocks(self, level):
3360
    if level == locking.LEVEL_NODEGROUP:
3361
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3362

    
3363
      if self.req_target_uuids:
3364
        lock_groups = set(self.req_target_uuids)
3365

    
3366
        # Lock all groups used by instance optimistically; this requires going
3367
        # via the node before it's locked, requiring verification later on
3368
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3369
        lock_groups.update(instance_groups)
3370
      else:
3371
        # No target groups, need to lock all of them
3372
        lock_groups = locking.ALL_SET
3373

    
3374
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3375

    
3376
    elif level == locking.LEVEL_NODE:
3377
      if self.req_target_uuids:
3378
        # Lock all nodes used by instances
3379
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3380
        self._LockInstancesNodes()
3381

    
3382
        # Lock all nodes in all potential target groups
3383
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3384
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3385
        member_nodes = [node_name
3386
                        for group in lock_groups
3387
                        for node_name in self.cfg.GetNodeGroup(group).members]
3388
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3389
      else:
3390
        # Lock all nodes as all groups are potential targets
3391
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3392

    
3393
  def CheckPrereq(self):
3394
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3395
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3396
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3397

    
3398
    assert (self.req_target_uuids is None or
3399
            owned_groups.issuperset(self.req_target_uuids))
3400
    assert owned_instances == set([self.op.instance_name])
3401

    
3402
    # Get instance information
3403
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3404

    
3405
    # Check if node groups for locked instance are still correct
3406
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3407
      ("Instance %s's nodes changed while we kept the lock" %
3408
       self.op.instance_name)
3409

    
3410
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3411
                                          owned_groups)
3412

    
3413
    if self.req_target_uuids:
3414
      # User requested specific target groups
3415
      self.target_uuids = frozenset(self.req_target_uuids)
3416
    else:
3417
      # All groups except those used by the instance are potential targets
3418
      self.target_uuids = owned_groups - inst_groups
3419

    
3420
    conflicting_groups = self.target_uuids & inst_groups
3421
    if conflicting_groups:
3422
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3423
                                 " used by the instance '%s'" %
3424
                                 (utils.CommaJoin(conflicting_groups),
3425
                                  self.op.instance_name),
3426
                                 errors.ECODE_INVAL)
3427

    
3428
    if not self.target_uuids:
3429
      raise errors.OpPrereqError("There are no possible target groups",
3430
                                 errors.ECODE_INVAL)
3431

    
3432
  def BuildHooksEnv(self):
3433
    """Build hooks env.
3434

3435
    """
3436
    assert self.target_uuids
3437

    
3438
    env = {
3439
      "TARGET_GROUPS": " ".join(self.target_uuids),
3440
      }
3441

    
3442
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3443

    
3444
    return env
3445

    
3446
  def BuildHooksNodes(self):
3447
    """Build hooks nodes.
3448

3449
    """
3450
    mn = self.cfg.GetMasterNode()
3451
    return ([mn], [mn])
3452

    
3453
  def Exec(self, feedback_fn):
3454
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3455

    
3456
    assert instances == [self.op.instance_name], "Instance not locked"
3457

    
3458
    req = iallocator.IAReqGroupChange(instances=instances,
3459
                                      target_groups=list(self.target_uuids))
3460
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3461

    
3462
    ial.Run(self.op.iallocator)
3463

    
3464
    if not ial.success:
3465
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3466
                                 " instance '%s' using iallocator '%s': %s" %
3467
                                 (self.op.instance_name, self.op.iallocator,
3468
                                  ial.info), errors.ECODE_NORES)
3469

    
3470
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3471

    
3472
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3473
                 " instance '%s'", len(jobs), self.op.instance_name)
3474

    
3475
    return ResultWithJobs(jobs)