Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 1c3231aa

History | View | Annotate | Download (137.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_name_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176

    
177
    if net is None or net.lower() == constants.VALUE_NONE:
178
      net = None
179
    else:
180
      if nic_mode_req is not None or link is not None:
181
        raise errors.OpPrereqError("If network is given, no mode or link"
182
                                   " is allowed to be passed",
183
                                   errors.ECODE_INVAL)
184

    
185
    # ip validity checks
186
    if ip is None or ip.lower() == constants.VALUE_NONE:
187
      nic_ip = None
188
    elif ip.lower() == constants.VALUE_AUTO:
189
      if not op.name_check:
190
        raise errors.OpPrereqError("IP address set to auto but name checks"
191
                                   " have been skipped",
192
                                   errors.ECODE_INVAL)
193
      nic_ip = default_ip
194
    else:
195
      # We defer pool operations until later, so that the iallocator has
196
      # filled in the instance's node(s) dimara
197
      if ip.lower() == constants.NIC_IP_POOL:
198
        if net is None:
199
          raise errors.OpPrereqError("if ip=pool, parameter network"
200
                                     " must be passed too",
201
                                     errors.ECODE_INVAL)
202

    
203
      elif not netutils.IPAddress.IsValid(ip):
204
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
205
                                   errors.ECODE_INVAL)
206

    
207
      nic_ip = ip
208

    
209
    # TODO: check the ip address for uniqueness
210
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
211
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
212
                                 errors.ECODE_INVAL)
213

    
214
    # MAC address verification
215
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
216
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
217
      mac = utils.NormalizeAndValidateMac(mac)
218

    
219
      try:
220
        # TODO: We need to factor this out
221
        cfg.ReserveMAC(mac, ec_id)
222
      except errors.ReservationError:
223
        raise errors.OpPrereqError("MAC address %s already in use"
224
                                   " in cluster" % mac,
225
                                   errors.ECODE_NOTUNIQUE)
226

    
227
    #  Build nic parameters
228
    nicparams = {}
229
    if nic_mode_req:
230
      nicparams[constants.NIC_MODE] = nic_mode
231
    if link:
232
      nicparams[constants.NIC_LINK] = link
233

    
234
    check_params = cluster.SimpleFillNIC(nicparams)
235
    objects.NIC.CheckParameterSyntax(check_params)
236
    net_uuid = cfg.LookupNetwork(net)
237
    name = nic.get(constants.INIC_NAME, None)
238
    if name is not None and name.lower() == constants.VALUE_NONE:
239
      name = None
240
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
241
                          network=net_uuid, nicparams=nicparams)
242
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
243
    nics.append(nic_obj)
244

    
245
  return nics
246

    
247

    
248
def _CheckForConflictingIp(lu, ip, node_uuid):
249
  """In case of conflicting IP address raise error.
250

251
  @type ip: string
252
  @param ip: IP address
253
  @type node_uuid: string
254
  @param node_uuid: node UUID
255

256
  """
257
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
258
  if conf_net is not None:
259
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
260
                                " network %s, but the target NIC does not." %
261
                                (ip, conf_net)),
262
                               errors.ECODE_STATE)
263

    
264
  return (None, None)
265

    
266

    
267
def _ComputeIPolicyInstanceSpecViolation(
268
  ipolicy, instance_spec, disk_template,
269
  _compute_fn=ComputeIPolicySpecViolation):
270
  """Compute if instance specs meets the specs of ipolicy.
271

272
  @type ipolicy: dict
273
  @param ipolicy: The ipolicy to verify against
274
  @param instance_spec: dict
275
  @param instance_spec: The instance spec to verify
276
  @type disk_template: string
277
  @param disk_template: the disk template of the instance
278
  @param _compute_fn: The function to verify ipolicy (unittest only)
279
  @see: L{ComputeIPolicySpecViolation}
280

281
  """
282
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
283
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
284
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
285
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
286
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
287
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
288

    
289
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
290
                     disk_sizes, spindle_use, disk_template)
291

    
292

    
293
def _CheckOSVariant(os_obj, name):
294
  """Check whether an OS name conforms to the os variants specification.
295

296
  @type os_obj: L{objects.OS}
297
  @param os_obj: OS object to check
298
  @type name: string
299
  @param name: OS name passed by the user, to check for validity
300

301
  """
302
  variant = objects.OS.GetVariant(name)
303
  if not os_obj.supported_variants:
304
    if variant:
305
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
306
                                 " passed)" % (os_obj.name, variant),
307
                                 errors.ECODE_INVAL)
308
    return
309
  if not variant:
310
    raise errors.OpPrereqError("OS name must include a variant",
311
                               errors.ECODE_INVAL)
312

    
313
  if variant not in os_obj.supported_variants:
314
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
315

    
316

    
317
class LUInstanceCreate(LogicalUnit):
318
  """Create an instance.
319

320
  """
321
  HPATH = "instance-add"
322
  HTYPE = constants.HTYPE_INSTANCE
323
  REQ_BGL = False
324

    
325
  def CheckArguments(self):
326
    """Check arguments.
327

328
    """
329
    # do not require name_check to ease forward/backward compatibility
330
    # for tools
331
    if self.op.no_install and self.op.start:
332
      self.LogInfo("No-installation mode selected, disabling startup")
333
      self.op.start = False
334
    # validate/normalize the instance name
335
    self.op.instance_name = \
336
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
337

    
338
    if self.op.ip_check and not self.op.name_check:
339
      # TODO: make the ip check more flexible and not depend on the name check
340
      raise errors.OpPrereqError("Cannot do IP address check without a name"
341
                                 " check", errors.ECODE_INVAL)
342

    
343
    # check nics' parameter names
344
    for nic in self.op.nics:
345
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
346
    # check that NIC's parameters names are unique and valid
347
    utils.ValidateDeviceNames("NIC", self.op.nics)
348

    
349
    # check that disk's names are unique and valid
350
    utils.ValidateDeviceNames("disk", self.op.disks)
351

    
352
    cluster = self.cfg.GetClusterInfo()
353
    if not self.op.disk_template in cluster.enabled_disk_templates:
354
      raise errors.OpPrereqError("Cannot create an instance with disk template"
355
                                 " '%s', because it is not enabled in the"
356
                                 " cluster. Enabled disk templates are: %s." %
357
                                 (self.op.disk_template,
358
                                  ",".join(cluster.enabled_disk_templates)))
359

    
360
    # check disks. parameter names and consistent adopt/no-adopt strategy
361
    has_adopt = has_no_adopt = False
362
    for disk in self.op.disks:
363
      if self.op.disk_template != constants.DT_EXT:
364
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
365
      if constants.IDISK_ADOPT in disk:
366
        has_adopt = True
367
      else:
368
        has_no_adopt = True
369
    if has_adopt and has_no_adopt:
370
      raise errors.OpPrereqError("Either all disks are adopted or none is",
371
                                 errors.ECODE_INVAL)
372
    if has_adopt:
373
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
374
        raise errors.OpPrereqError("Disk adoption is not supported for the"
375
                                   " '%s' disk template" %
376
                                   self.op.disk_template,
377
                                   errors.ECODE_INVAL)
378
      if self.op.iallocator is not None:
379
        raise errors.OpPrereqError("Disk adoption not allowed with an"
380
                                   " iallocator script", errors.ECODE_INVAL)
381
      if self.op.mode == constants.INSTANCE_IMPORT:
382
        raise errors.OpPrereqError("Disk adoption not allowed for"
383
                                   " instance import", errors.ECODE_INVAL)
384
    else:
385
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
386
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
387
                                   " but no 'adopt' parameter given" %
388
                                   self.op.disk_template,
389
                                   errors.ECODE_INVAL)
390

    
391
    self.adopt_disks = has_adopt
392

    
393
    # instance name verification
394
    if self.op.name_check:
395
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
396
      self.op.instance_name = self.hostname1.name
397
      # used in CheckPrereq for ip ping check
398
      self.check_ip = self.hostname1.ip
399
    else:
400
      self.check_ip = None
401

    
402
    # file storage checks
403
    if (self.op.file_driver and
404
        not self.op.file_driver in constants.FILE_DRIVER):
405
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
406
                                 self.op.file_driver, errors.ECODE_INVAL)
407

    
408
    if self.op.disk_template == constants.DT_FILE:
409
      opcodes.RequireFileStorage()
410
    elif self.op.disk_template == constants.DT_SHARED_FILE:
411
      opcodes.RequireSharedFileStorage()
412

    
413
    ### Node/iallocator related checks
414
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
415

    
416
    if self.op.pnode is not None:
417
      if self.op.disk_template in constants.DTS_INT_MIRROR:
418
        if self.op.snode is None:
419
          raise errors.OpPrereqError("The networked disk templates need"
420
                                     " a mirror node", errors.ECODE_INVAL)
421
      elif self.op.snode:
422
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
423
                        " template")
424
        self.op.snode = None
425

    
426
    _CheckOpportunisticLocking(self.op)
427

    
428
    self._cds = GetClusterDomainSecret()
429

    
430
    if self.op.mode == constants.INSTANCE_IMPORT:
431
      # On import force_variant must be True, because if we forced it at
432
      # initial install, our only chance when importing it back is that it
433
      # works again!
434
      self.op.force_variant = True
435

    
436
      if self.op.no_install:
437
        self.LogInfo("No-installation mode has no effect during import")
438

    
439
    elif self.op.mode == constants.INSTANCE_CREATE:
440
      if self.op.os_type is None:
441
        raise errors.OpPrereqError("No guest OS specified",
442
                                   errors.ECODE_INVAL)
443
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
444
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
445
                                   " installation" % self.op.os_type,
446
                                   errors.ECODE_STATE)
447
      if self.op.disk_template is None:
448
        raise errors.OpPrereqError("No disk template specified",
449
                                   errors.ECODE_INVAL)
450

    
451
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
452
      # Check handshake to ensure both clusters have the same domain secret
453
      src_handshake = self.op.source_handshake
454
      if not src_handshake:
455
        raise errors.OpPrereqError("Missing source handshake",
456
                                   errors.ECODE_INVAL)
457

    
458
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
459
                                                           src_handshake)
460
      if errmsg:
461
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
462
                                   errors.ECODE_INVAL)
463

    
464
      # Load and check source CA
465
      self.source_x509_ca_pem = self.op.source_x509_ca
466
      if not self.source_x509_ca_pem:
467
        raise errors.OpPrereqError("Missing source X509 CA",
468
                                   errors.ECODE_INVAL)
469

    
470
      try:
471
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
472
                                                    self._cds)
473
      except OpenSSL.crypto.Error, err:
474
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
475
                                   (err, ), errors.ECODE_INVAL)
476

    
477
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
478
      if errcode is not None:
479
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
480
                                   errors.ECODE_INVAL)
481

    
482
      self.source_x509_ca = cert
483

    
484
      src_instance_name = self.op.source_instance_name
485
      if not src_instance_name:
486
        raise errors.OpPrereqError("Missing source instance name",
487
                                   errors.ECODE_INVAL)
488

    
489
      self.source_instance_name = \
490
        netutils.GetHostname(name=src_instance_name).name
491

    
492
    else:
493
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
494
                                 self.op.mode, errors.ECODE_INVAL)
495

    
496
  def ExpandNames(self):
497
    """ExpandNames for CreateInstance.
498

499
    Figure out the right locks for instance creation.
500

501
    """
502
    self.needed_locks = {}
503

    
504
    instance_name = self.op.instance_name
505
    # this is just a preventive check, but someone might still add this
506
    # instance in the meantime, and creation will fail at lock-add time
507
    if instance_name in self.cfg.GetInstanceList():
508
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
509
                                 instance_name, errors.ECODE_EXISTS)
510

    
511
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
512

    
513
    if self.op.iallocator:
514
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
515
      # specifying a group on instance creation and then selecting nodes from
516
      # that group
517
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
518
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
519

    
520
      if self.op.opportunistic_locking:
521
        self.opportunistic_locks[locking.LEVEL_NODE] = True
522
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
523
    else:
524
      (self.op.pnode_uuid, self.op.pnode) = \
525
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
526
      nodelist = [self.op.pnode_uuid]
527
      if self.op.snode is not None:
528
        (self.op.snode_uuid, self.op.snode) = \
529
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
530
        nodelist.append(self.op.snode_uuid)
531
      self.needed_locks[locking.LEVEL_NODE] = nodelist
532

    
533
    # in case of import lock the source node too
534
    if self.op.mode == constants.INSTANCE_IMPORT:
535
      src_node = self.op.src_node
536
      src_path = self.op.src_path
537

    
538
      if src_path is None:
539
        self.op.src_path = src_path = self.op.instance_name
540

    
541
      if src_node is None:
542
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
543
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
544
        self.op.src_node = None
545
        if os.path.isabs(src_path):
546
          raise errors.OpPrereqError("Importing an instance from a path"
547
                                     " requires a source node option",
548
                                     errors.ECODE_INVAL)
549
      else:
550
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
551
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
552
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
553
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
554
        if not os.path.isabs(src_path):
555
          self.op.src_path = src_path = \
556
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
557

    
558
    self.needed_locks[locking.LEVEL_NODE_RES] = \
559
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
560

    
561
  def _RunAllocator(self):
562
    """Run the allocator based on input opcode.
563

564
    """
565
    if self.op.opportunistic_locking:
566
      # Only consider nodes for which a lock is held
567
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
568
    else:
569
      node_whitelist = None
570

    
571
    #TODO Export network to iallocator so that it chooses a pnode
572
    #     in a nodegroup that has the desired network connected to
573
    req = _CreateInstanceAllocRequest(self.op, self.disks,
574
                                      self.nics, self.be_full,
575
                                      self.cfg.GetNodeNames(node_whitelist))
576
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
577

    
578
    ial.Run(self.op.iallocator)
579

    
580
    if not ial.success:
581
      # When opportunistic locks are used only a temporary failure is generated
582
      if self.op.opportunistic_locking:
583
        ecode = errors.ECODE_TEMP_NORES
584
      else:
585
        ecode = errors.ECODE_NORES
586

    
587
      raise errors.OpPrereqError("Can't compute nodes using"
588
                                 " iallocator '%s': %s" %
589
                                 (self.op.iallocator, ial.info),
590
                                 ecode)
591

    
592
    (self.op.pnode_uuid, self.op.pnode) = \
593
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
594
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
595
                 self.op.instance_name, self.op.iallocator,
596
                 utils.CommaJoin(ial.result))
597

    
598
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
599

    
600
    if req.RequiredNodes() == 2:
601
      (self.op.snode_uuid, self.op.snode) = \
602
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
603

    
604
  def BuildHooksEnv(self):
605
    """Build hooks env.
606

607
    This runs on master, primary and secondary nodes of the instance.
608

609
    """
610
    env = {
611
      "ADD_MODE": self.op.mode,
612
      }
613
    if self.op.mode == constants.INSTANCE_IMPORT:
614
      env["SRC_NODE"] = self.op.src_node
615
      env["SRC_PATH"] = self.op.src_path
616
      env["SRC_IMAGES"] = self.src_images
617

    
618
    env.update(BuildInstanceHookEnv(
619
      name=self.op.instance_name,
620
      primary_node_name=self.op.pnode,
621
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
622
      status=self.op.start,
623
      os_type=self.op.os_type,
624
      minmem=self.be_full[constants.BE_MINMEM],
625
      maxmem=self.be_full[constants.BE_MAXMEM],
626
      vcpus=self.be_full[constants.BE_VCPUS],
627
      nics=NICListToTuple(self, self.nics),
628
      disk_template=self.op.disk_template,
629
      disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
630
              d[constants.IDISK_MODE]) for d in self.disks],
631
      bep=self.be_full,
632
      hvp=self.hv_full,
633
      hypervisor_name=self.op.hypervisor,
634
      tags=self.op.tags,
635
      ))
636

    
637
    return env
638

    
639
  def BuildHooksNodes(self):
640
    """Build hooks nodes.
641

642
    """
643
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
644
    return nl, nl
645

    
646
  def _ReadExportInfo(self):
647
    """Reads the export information from disk.
648

649
    It will override the opcode source node and path with the actual
650
    information, if these two were not specified before.
651

652
    @return: the export information
653

654
    """
655
    assert self.op.mode == constants.INSTANCE_IMPORT
656

    
657
    src_node_uuid = self.op.src_node_uuid
658
    src_path = self.op.src_path
659

    
660
    if src_node_uuid is None:
661
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
662
      exp_list = self.rpc.call_export_list(locked_nodes)
663
      found = False
664
      for node in exp_list:
665
        if exp_list[node].fail_msg:
666
          continue
667
        if src_path in exp_list[node].payload:
668
          found = True
669
          self.op.src_node = node
670
          self.op.src_node_uuid = src_node_uuid = \
671
            self.cfg.GetNodeInfoByName(node).uuid
672
          self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
673
                                                       src_path)
674
          break
675
      if not found:
676
        raise errors.OpPrereqError("No export found for relative path %s" %
677
                                   src_path, errors.ECODE_INVAL)
678

    
679
    CheckNodeOnline(self, src_node_uuid)
680
    result = self.rpc.call_export_info(src_node_uuid, src_path)
681
    result.Raise("No export or invalid export found in dir %s" % src_path)
682

    
683
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
684
    if not export_info.has_section(constants.INISECT_EXP):
685
      raise errors.ProgrammerError("Corrupted export config",
686
                                   errors.ECODE_ENVIRON)
687

    
688
    ei_version = export_info.get(constants.INISECT_EXP, "version")
689
    if int(ei_version) != constants.EXPORT_VERSION:
690
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
691
                                 (ei_version, constants.EXPORT_VERSION),
692
                                 errors.ECODE_ENVIRON)
693
    return export_info
694

    
695
  def _ReadExportParams(self, einfo):
696
    """Use export parameters as defaults.
697

698
    In case the opcode doesn't specify (as in override) some instance
699
    parameters, then try to use them from the export information, if
700
    that declares them.
701

702
    """
703
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
704

    
705
    if self.op.disk_template is None:
706
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
707
        self.op.disk_template = einfo.get(constants.INISECT_INS,
708
                                          "disk_template")
709
        if self.op.disk_template not in constants.DISK_TEMPLATES:
710
          raise errors.OpPrereqError("Disk template specified in configuration"
711
                                     " file is not one of the allowed values:"
712
                                     " %s" %
713
                                     " ".join(constants.DISK_TEMPLATES),
714
                                     errors.ECODE_INVAL)
715
      else:
716
        raise errors.OpPrereqError("No disk template specified and the export"
717
                                   " is missing the disk_template information",
718
                                   errors.ECODE_INVAL)
719

    
720
    if not self.op.disks:
721
      disks = []
722
      # TODO: import the disk iv_name too
723
      for idx in range(constants.MAX_DISKS):
724
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
725
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
726
          disks.append({constants.IDISK_SIZE: disk_sz})
727
      self.op.disks = disks
728
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
729
        raise errors.OpPrereqError("No disk info specified and the export"
730
                                   " is missing the disk information",
731
                                   errors.ECODE_INVAL)
732

    
733
    if not self.op.nics:
734
      nics = []
735
      for idx in range(constants.MAX_NICS):
736
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
737
          ndict = {}
738
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
739
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
740
            ndict[name] = v
741
          nics.append(ndict)
742
        else:
743
          break
744
      self.op.nics = nics
745

    
746
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
747
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
748

    
749
    if (self.op.hypervisor is None and
750
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
751
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
752

    
753
    if einfo.has_section(constants.INISECT_HYP):
754
      # use the export parameters but do not override the ones
755
      # specified by the user
756
      for name, value in einfo.items(constants.INISECT_HYP):
757
        if name not in self.op.hvparams:
758
          self.op.hvparams[name] = value
759

    
760
    if einfo.has_section(constants.INISECT_BEP):
761
      # use the parameters, without overriding
762
      for name, value in einfo.items(constants.INISECT_BEP):
763
        if name not in self.op.beparams:
764
          self.op.beparams[name] = value
765
        # Compatibility for the old "memory" be param
766
        if name == constants.BE_MEMORY:
767
          if constants.BE_MAXMEM not in self.op.beparams:
768
            self.op.beparams[constants.BE_MAXMEM] = value
769
          if constants.BE_MINMEM not in self.op.beparams:
770
            self.op.beparams[constants.BE_MINMEM] = value
771
    else:
772
      # try to read the parameters old style, from the main section
773
      for name in constants.BES_PARAMETERS:
774
        if (name not in self.op.beparams and
775
            einfo.has_option(constants.INISECT_INS, name)):
776
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
777

    
778
    if einfo.has_section(constants.INISECT_OSP):
779
      # use the parameters, without overriding
780
      for name, value in einfo.items(constants.INISECT_OSP):
781
        if name not in self.op.osparams:
782
          self.op.osparams[name] = value
783

    
784
  def _RevertToDefaults(self, cluster):
785
    """Revert the instance parameters to the default values.
786

787
    """
788
    # hvparams
789
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
790
    for name in self.op.hvparams.keys():
791
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
792
        del self.op.hvparams[name]
793
    # beparams
794
    be_defs = cluster.SimpleFillBE({})
795
    for name in self.op.beparams.keys():
796
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
797
        del self.op.beparams[name]
798
    # nic params
799
    nic_defs = cluster.SimpleFillNIC({})
800
    for nic in self.op.nics:
801
      for name in constants.NICS_PARAMETERS:
802
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
803
          del nic[name]
804
    # osparams
805
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
806
    for name in self.op.osparams.keys():
807
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
808
        del self.op.osparams[name]
809

    
810
  def _CalculateFileStorageDir(self):
811
    """Calculate final instance file storage dir.
812

813
    """
814
    # file storage dir calculation/check
815
    self.instance_file_storage_dir = None
816
    if self.op.disk_template in constants.DTS_FILEBASED:
817
      # build the full file storage dir path
818
      joinargs = []
819

    
820
      if self.op.disk_template == constants.DT_SHARED_FILE:
821
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
822
      else:
823
        get_fsd_fn = self.cfg.GetFileStorageDir
824

    
825
      cfg_storagedir = get_fsd_fn()
826
      if not cfg_storagedir:
827
        raise errors.OpPrereqError("Cluster file storage dir not defined",
828
                                   errors.ECODE_STATE)
829
      joinargs.append(cfg_storagedir)
830

    
831
      if self.op.file_storage_dir is not None:
832
        joinargs.append(self.op.file_storage_dir)
833

    
834
      joinargs.append(self.op.instance_name)
835

    
836
      # pylint: disable=W0142
837
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
838

    
839
  def CheckPrereq(self): # pylint: disable=R0914
840
    """Check prerequisites.
841

842
    """
843
    self._CalculateFileStorageDir()
844

    
845
    if self.op.mode == constants.INSTANCE_IMPORT:
846
      export_info = self._ReadExportInfo()
847
      self._ReadExportParams(export_info)
848
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
849
    else:
850
      self._old_instance_name = None
851

    
852
    if (not self.cfg.GetVGName() and
853
        self.op.disk_template not in constants.DTS_NOT_LVM):
854
      raise errors.OpPrereqError("Cluster does not support lvm-based"
855
                                 " instances", errors.ECODE_STATE)
856

    
857
    if (self.op.hypervisor is None or
858
        self.op.hypervisor == constants.VALUE_AUTO):
859
      self.op.hypervisor = self.cfg.GetHypervisorType()
860

    
861
    cluster = self.cfg.GetClusterInfo()
862
    enabled_hvs = cluster.enabled_hypervisors
863
    if self.op.hypervisor not in enabled_hvs:
864
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
865
                                 " cluster (%s)" %
866
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
867
                                 errors.ECODE_STATE)
868

    
869
    # Check tag validity
870
    for tag in self.op.tags:
871
      objects.TaggableObject.ValidateTag(tag)
872

    
873
    # check hypervisor parameter syntax (locally)
874
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
875
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
876
                                      self.op.hvparams)
877
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
878
    hv_type.CheckParameterSyntax(filled_hvp)
879
    self.hv_full = filled_hvp
880
    # check that we don't specify global parameters on an instance
881
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
882
                         "instance", "cluster")
883

    
884
    # fill and remember the beparams dict
885
    self.be_full = _ComputeFullBeParams(self.op, cluster)
886

    
887
    # build os parameters
888
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
889

    
890
    # now that hvp/bep are in final format, let's reset to defaults,
891
    # if told to do so
892
    if self.op.identify_defaults:
893
      self._RevertToDefaults(cluster)
894

    
895
    # NIC buildup
896
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
897
                             self.proc.GetECId())
898

    
899
    # disk checks/pre-build
900
    default_vg = self.cfg.GetVGName()
901
    self.disks = ComputeDisks(self.op, default_vg)
902

    
903
    if self.op.mode == constants.INSTANCE_IMPORT:
904
      disk_images = []
905
      for idx in range(len(self.disks)):
906
        option = "disk%d_dump" % idx
907
        if export_info.has_option(constants.INISECT_INS, option):
908
          # FIXME: are the old os-es, disk sizes, etc. useful?
909
          export_name = export_info.get(constants.INISECT_INS, option)
910
          image = utils.PathJoin(self.op.src_path, export_name)
911
          disk_images.append(image)
912
        else:
913
          disk_images.append(False)
914

    
915
      self.src_images = disk_images
916

    
917
      if self.op.instance_name == self._old_instance_name:
918
        for idx, nic in enumerate(self.nics):
919
          if nic.mac == constants.VALUE_AUTO:
920
            nic_mac_ini = "nic%d_mac" % idx
921
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
922

    
923
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
924

    
925
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
926
    if self.op.ip_check:
927
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
928
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
929
                                   (self.check_ip, self.op.instance_name),
930
                                   errors.ECODE_NOTUNIQUE)
931

    
932
    #### mac address generation
933
    # By generating here the mac address both the allocator and the hooks get
934
    # the real final mac address rather than the 'auto' or 'generate' value.
935
    # There is a race condition between the generation and the instance object
936
    # creation, which means that we know the mac is valid now, but we're not
937
    # sure it will be when we actually add the instance. If things go bad
938
    # adding the instance will abort because of a duplicate mac, and the
939
    # creation job will fail.
940
    for nic in self.nics:
941
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
942
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
943

    
944
    #### allocator run
945

    
946
    if self.op.iallocator is not None:
947
      self._RunAllocator()
948

    
949
    # Release all unneeded node locks
950
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
951
                               self.op.src_node_uuid])
952
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
953
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
954
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
955

    
956
    assert (self.owned_locks(locking.LEVEL_NODE) ==
957
            self.owned_locks(locking.LEVEL_NODE_RES)), \
958
      "Node locks differ from node resource locks"
959

    
960
    #### node related checks
961

    
962
    # check primary node
963
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
964
    assert self.pnode is not None, \
965
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
966
    if pnode.offline:
967
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
968
                                 pnode.name, errors.ECODE_STATE)
969
    if pnode.drained:
970
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
971
                                 pnode.name, errors.ECODE_STATE)
972
    if not pnode.vm_capable:
973
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
974
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
975

    
976
    self.secondaries = []
977

    
978
    # Fill in any IPs from IP pools. This must happen here, because we need to
979
    # know the nic's primary node, as specified by the iallocator
980
    for idx, nic in enumerate(self.nics):
981
      net_uuid = nic.network
982
      if net_uuid is not None:
983
        nobj = self.cfg.GetNetwork(net_uuid)
984
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
985
        if netparams is None:
986
          raise errors.OpPrereqError("No netparams found for network"
987
                                     " %s. Propably not connected to"
988
                                     " node's %s nodegroup" %
989
                                     (nobj.name, self.pnode.name),
990
                                     errors.ECODE_INVAL)
991
        self.LogInfo("NIC/%d inherits netparams %s" %
992
                     (idx, netparams.values()))
993
        nic.nicparams = dict(netparams)
994
        if nic.ip is not None:
995
          if nic.ip.lower() == constants.NIC_IP_POOL:
996
            try:
997
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
998
            except errors.ReservationError:
999
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1000
                                         " from the address pool" % idx,
1001
                                         errors.ECODE_STATE)
1002
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1003
          else:
1004
            try:
1005
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1006
            except errors.ReservationError:
1007
              raise errors.OpPrereqError("IP address %s already in use"
1008
                                         " or does not belong to network %s" %
1009
                                         (nic.ip, nobj.name),
1010
                                         errors.ECODE_NOTUNIQUE)
1011

    
1012
      # net is None, ip None or given
1013
      elif self.op.conflicts_check:
1014
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1015

    
1016
    # mirror node verification
1017
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1018
      if self.op.snode_uuid == pnode.uuid:
1019
        raise errors.OpPrereqError("The secondary node cannot be the"
1020
                                   " primary node", errors.ECODE_INVAL)
1021
      CheckNodeOnline(self, self.op.snode_uuid)
1022
      CheckNodeNotDrained(self, self.op.snode_uuid)
1023
      CheckNodeVmCapable(self, self.op.snode_uuid)
1024
      self.secondaries.append(self.op.snode_uuid)
1025

    
1026
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1027
      if pnode.group != snode.group:
1028
        self.LogWarning("The primary and secondary nodes are in two"
1029
                        " different node groups; the disk parameters"
1030
                        " from the first disk's node group will be"
1031
                        " used")
1032

    
1033
    nodes = [pnode]
1034
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1035
      nodes.append(snode)
1036
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1037
    excl_stor = compat.any(map(has_es, nodes))
1038
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1039
      raise errors.OpPrereqError("Disk template %s not supported with"
1040
                                 " exclusive storage" % self.op.disk_template,
1041
                                 errors.ECODE_STATE)
1042
    for disk in self.disks:
1043
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1044

    
1045
    node_uuids = [pnode.uuid] + self.secondaries
1046

    
1047
    if not self.adopt_disks:
1048
      if self.op.disk_template == constants.DT_RBD:
1049
        # _CheckRADOSFreeSpace() is just a placeholder.
1050
        # Any function that checks prerequisites can be placed here.
1051
        # Check if there is enough space on the RADOS cluster.
1052
        CheckRADOSFreeSpace()
1053
      elif self.op.disk_template == constants.DT_EXT:
1054
        # FIXME: Function that checks prereqs if needed
1055
        pass
1056
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1057
        # Check lv size requirements, if not adopting
1058
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1059
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1060
      else:
1061
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1062
        pass
1063

    
1064
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1065
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1066
                                disk[constants.IDISK_ADOPT])
1067
                     for disk in self.disks])
1068
      if len(all_lvs) != len(self.disks):
1069
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1070
                                   errors.ECODE_INVAL)
1071
      for lv_name in all_lvs:
1072
        try:
1073
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1074
          # to ReserveLV uses the same syntax
1075
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1076
        except errors.ReservationError:
1077
          raise errors.OpPrereqError("LV named %s used by another instance" %
1078
                                     lv_name, errors.ECODE_NOTUNIQUE)
1079

    
1080
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1081
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1082

    
1083
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1084
                                       vg_names.payload.keys())[pnode.uuid]
1085
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1086
      node_lvs = node_lvs.payload
1087

    
1088
      delta = all_lvs.difference(node_lvs.keys())
1089
      if delta:
1090
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1091
                                   utils.CommaJoin(delta),
1092
                                   errors.ECODE_INVAL)
1093
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1094
      if online_lvs:
1095
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1096
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1097
                                   errors.ECODE_STATE)
1098
      # update the size of disk based on what is found
1099
      for dsk in self.disks:
1100
        dsk[constants.IDISK_SIZE] = \
1101
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1102
                                        dsk[constants.IDISK_ADOPT])][0]))
1103

    
1104
    elif self.op.disk_template == constants.DT_BLOCK:
1105
      # Normalize and de-duplicate device paths
1106
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1107
                       for disk in self.disks])
1108
      if len(all_disks) != len(self.disks):
1109
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1110
                                   errors.ECODE_INVAL)
1111
      baddisks = [d for d in all_disks
1112
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1113
      if baddisks:
1114
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1115
                                   " cannot be adopted" %
1116
                                   (utils.CommaJoin(baddisks),
1117
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1118
                                   errors.ECODE_INVAL)
1119

    
1120
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1121
                                            list(all_disks))[pnode.uuid]
1122
      node_disks.Raise("Cannot get block device information from node %s" %
1123
                       pnode.name)
1124
      node_disks = node_disks.payload
1125
      delta = all_disks.difference(node_disks.keys())
1126
      if delta:
1127
        raise errors.OpPrereqError("Missing block device(s): %s" %
1128
                                   utils.CommaJoin(delta),
1129
                                   errors.ECODE_INVAL)
1130
      for dsk in self.disks:
1131
        dsk[constants.IDISK_SIZE] = \
1132
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1133

    
1134
    # Verify instance specs
1135
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1136
    ispec = {
1137
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1138
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1139
      constants.ISPEC_DISK_COUNT: len(self.disks),
1140
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1141
                                  for disk in self.disks],
1142
      constants.ISPEC_NIC_COUNT: len(self.nics),
1143
      constants.ISPEC_SPINDLE_USE: spindle_use,
1144
      }
1145

    
1146
    group_info = self.cfg.GetNodeGroup(pnode.group)
1147
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1148
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1149
                                               self.op.disk_template)
1150
    if not self.op.ignore_ipolicy and res:
1151
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1152
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1153
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1154

    
1155
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1156

    
1157
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1158
    # check OS parameters (remotely)
1159
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1160

    
1161
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1162

    
1163
    #TODO: _CheckExtParams (remotely)
1164
    # Check parameters for extstorage
1165

    
1166
    # memory check on primary node
1167
    #TODO(dynmem): use MINMEM for checking
1168
    if self.op.start:
1169
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1170
                                self.op.hvparams)
1171
      CheckNodeFreeMemory(self, self.pnode.uuid,
1172
                          "creating instance %s" % self.op.instance_name,
1173
                          self.be_full[constants.BE_MAXMEM],
1174
                          self.op.hypervisor, hvfull)
1175

    
1176
    self.dry_run_result = list(node_uuids)
1177

    
1178
  def Exec(self, feedback_fn):
1179
    """Create and add the instance to the cluster.
1180

1181
    """
1182
    instance = self.op.instance_name
1183
    pnode_name = self.pnode.name
1184

    
1185
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1186
                self.owned_locks(locking.LEVEL_NODE)), \
1187
      "Node locks differ from node resource locks"
1188
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1189

    
1190
    ht_kind = self.op.hypervisor
1191
    if ht_kind in constants.HTS_REQ_PORT:
1192
      network_port = self.cfg.AllocatePort()
1193
    else:
1194
      network_port = None
1195

    
1196
    # This is ugly but we got a chicken-egg problem here
1197
    # We can only take the group disk parameters, as the instance
1198
    # has no disks yet (we are generating them right here).
1199
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1200
    disks = GenerateDiskTemplate(self,
1201
                                 self.op.disk_template,
1202
                                 instance, self.pnode.uuid,
1203
                                 self.secondaries,
1204
                                 self.disks,
1205
                                 self.instance_file_storage_dir,
1206
                                 self.op.file_driver,
1207
                                 0,
1208
                                 feedback_fn,
1209
                                 self.cfg.GetGroupDiskParams(nodegroup))
1210

    
1211
    iobj = objects.Instance(name=instance, os=self.op.os_type,
1212
                            primary_node=self.pnode.uuid,
1213
                            nics=self.nics, disks=disks,
1214
                            disk_template=self.op.disk_template,
1215
                            disks_active=False,
1216
                            admin_state=constants.ADMINST_DOWN,
1217
                            network_port=network_port,
1218
                            beparams=self.op.beparams,
1219
                            hvparams=self.op.hvparams,
1220
                            hypervisor=self.op.hypervisor,
1221
                            osparams=self.op.osparams,
1222
                            )
1223

    
1224
    if self.op.tags:
1225
      for tag in self.op.tags:
1226
        iobj.AddTag(tag)
1227

    
1228
    if self.adopt_disks:
1229
      if self.op.disk_template == constants.DT_PLAIN:
1230
        # rename LVs to the newly-generated names; we need to construct
1231
        # 'fake' LV disks with the old data, plus the new unique_id
1232
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1233
        rename_to = []
1234
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1235
          rename_to.append(t_dsk.logical_id)
1236
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1237
          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1238
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1239
                                               zip(tmp_disks, rename_to))
1240
        result.Raise("Failed to rename adoped LVs")
1241
    else:
1242
      feedback_fn("* creating instance disks...")
1243
      try:
1244
        CreateDisks(self, iobj)
1245
      except errors.OpExecError:
1246
        self.LogWarning("Device creation failed")
1247
        self.cfg.ReleaseDRBDMinors(instance)
1248
        raise
1249

    
1250
    feedback_fn("adding instance %s to cluster config" % instance)
1251

    
1252
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1253

    
1254
    # Declare that we don't want to remove the instance lock anymore, as we've
1255
    # added the instance to the config
1256
    del self.remove_locks[locking.LEVEL_INSTANCE]
1257

    
1258
    if self.op.mode == constants.INSTANCE_IMPORT:
1259
      # Release unused nodes
1260
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1261
    else:
1262
      # Release all nodes
1263
      ReleaseLocks(self, locking.LEVEL_NODE)
1264

    
1265
    disk_abort = False
1266
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1267
      feedback_fn("* wiping instance disks...")
1268
      try:
1269
        WipeDisks(self, iobj)
1270
      except errors.OpExecError, err:
1271
        logging.exception("Wiping disks failed")
1272
        self.LogWarning("Wiping instance disks failed (%s)", err)
1273
        disk_abort = True
1274

    
1275
    if disk_abort:
1276
      # Something is already wrong with the disks, don't do anything else
1277
      pass
1278
    elif self.op.wait_for_sync:
1279
      disk_abort = not WaitForSync(self, iobj)
1280
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1281
      # make sure the disks are not degraded (still sync-ing is ok)
1282
      feedback_fn("* checking mirrors status")
1283
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1284
    else:
1285
      disk_abort = False
1286

    
1287
    if disk_abort:
1288
      RemoveDisks(self, iobj)
1289
      self.cfg.RemoveInstance(iobj.name)
1290
      # Make sure the instance lock gets removed
1291
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1292
      raise errors.OpExecError("There are some degraded disks for"
1293
                               " this instance")
1294

    
1295
    # instance disks are now active
1296
    iobj.disks_active = True
1297

    
1298
    # Release all node resource locks
1299
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1300

    
1301
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1302
      # we need to set the disks ID to the primary node, since the
1303
      # preceding code might or might have not done it, depending on
1304
      # disk template and other options
1305
      for disk in iobj.disks:
1306
        self.cfg.SetDiskID(disk, self.pnode.uuid)
1307
      if self.op.mode == constants.INSTANCE_CREATE:
1308
        if not self.op.no_install:
1309
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1310
                        not self.op.wait_for_sync)
1311
          if pause_sync:
1312
            feedback_fn("* pausing disk sync to install instance OS")
1313
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1314
                                                              (iobj.disks,
1315
                                                               iobj), True)
1316
            for idx, success in enumerate(result.payload):
1317
              if not success:
1318
                logging.warn("pause-sync of instance %s for disk %d failed",
1319
                             instance, idx)
1320

    
1321
          feedback_fn("* running the instance OS create scripts...")
1322
          # FIXME: pass debug option from opcode to backend
1323
          os_add_result = \
1324
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1325
                                          self.op.debug_level)
1326
          if pause_sync:
1327
            feedback_fn("* resuming disk sync")
1328
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1329
                                                              (iobj.disks,
1330
                                                               iobj), False)
1331
            for idx, success in enumerate(result.payload):
1332
              if not success:
1333
                logging.warn("resume-sync of instance %s for disk %d failed",
1334
                             instance, idx)
1335

    
1336
          os_add_result.Raise("Could not add os for instance %s"
1337
                              " on node %s" % (instance, pnode_name))
1338

    
1339
      else:
1340
        if self.op.mode == constants.INSTANCE_IMPORT:
1341
          feedback_fn("* running the instance OS import scripts...")
1342

    
1343
          transfers = []
1344

    
1345
          for idx, image in enumerate(self.src_images):
1346
            if not image:
1347
              continue
1348

    
1349
            # FIXME: pass debug option from opcode to backend
1350
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1351
                                               constants.IEIO_FILE, (image, ),
1352
                                               constants.IEIO_SCRIPT,
1353
                                               (iobj.disks[idx], idx),
1354
                                               None)
1355
            transfers.append(dt)
1356

    
1357
          import_result = \
1358
            masterd.instance.TransferInstanceData(self, feedback_fn,
1359
                                                  self.op.src_node_uuid,
1360
                                                  self.pnode.uuid,
1361
                                                  self.pnode.secondary_ip,
1362
                                                  iobj, transfers)
1363
          if not compat.all(import_result):
1364
            self.LogWarning("Some disks for instance %s on node %s were not"
1365
                            " imported successfully" % (instance, pnode_name))
1366

    
1367
          rename_from = self._old_instance_name
1368

    
1369
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1370
          feedback_fn("* preparing remote import...")
1371
          # The source cluster will stop the instance before attempting to make
1372
          # a connection. In some cases stopping an instance can take a long
1373
          # time, hence the shutdown timeout is added to the connection
1374
          # timeout.
1375
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1376
                             self.op.source_shutdown_timeout)
1377
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1378

    
1379
          assert iobj.primary_node == self.pnode.uuid
1380
          disk_results = \
1381
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1382
                                          self.source_x509_ca,
1383
                                          self._cds, timeouts)
1384
          if not compat.all(disk_results):
1385
            # TODO: Should the instance still be started, even if some disks
1386
            # failed to import (valid for local imports, too)?
1387
            self.LogWarning("Some disks for instance %s on node %s were not"
1388
                            " imported successfully" % (instance, pnode_name))
1389

    
1390
          rename_from = self.source_instance_name
1391

    
1392
        else:
1393
          # also checked in the prereq part
1394
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1395
                                       % self.op.mode)
1396

    
1397
        # Run rename script on newly imported instance
1398
        assert iobj.name == instance
1399
        feedback_fn("Running rename script for %s" % instance)
1400
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1401
                                                   rename_from,
1402
                                                   self.op.debug_level)
1403
        result.Warn("Failed to run rename script for %s on node %s" %
1404
                    (instance, pnode_name), self.LogWarning)
1405

    
1406
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1407

    
1408
    if self.op.start:
1409
      iobj.admin_state = constants.ADMINST_UP
1410
      self.cfg.Update(iobj, feedback_fn)
1411
      logging.info("Starting instance %s on node %s", instance, pnode_name)
1412
      feedback_fn("* starting instance...")
1413
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1414
                                            False, self.op.reason)
1415
      result.Raise("Could not start instance")
1416

    
1417
    return list(iobj.all_nodes)
1418

    
1419

    
1420
class LUInstanceRename(LogicalUnit):
1421
  """Rename an instance.
1422

1423
  """
1424
  HPATH = "instance-rename"
1425
  HTYPE = constants.HTYPE_INSTANCE
1426

    
1427
  def CheckArguments(self):
1428
    """Check arguments.
1429

1430
    """
1431
    if self.op.ip_check and not self.op.name_check:
1432
      # TODO: make the ip check more flexible and not depend on the name check
1433
      raise errors.OpPrereqError("IP address check requires a name check",
1434
                                 errors.ECODE_INVAL)
1435

    
1436
  def BuildHooksEnv(self):
1437
    """Build hooks env.
1438

1439
    This runs on master, primary and secondary nodes of the instance.
1440

1441
    """
1442
    env = BuildInstanceHookEnvByObject(self, self.instance)
1443
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1444
    return env
1445

    
1446
  def BuildHooksNodes(self):
1447
    """Build hooks nodes.
1448

1449
    """
1450
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1451
    return (nl, nl)
1452

    
1453
  def CheckPrereq(self):
1454
    """Check prerequisites.
1455

1456
    This checks that the instance is in the cluster and is not running.
1457

1458
    """
1459
    self.op.instance_name = ExpandInstanceName(self.cfg,
1460
                                               self.op.instance_name)
1461
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1462
    assert instance is not None
1463
    CheckNodeOnline(self, instance.primary_node)
1464
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1465
                       msg="cannot rename")
1466
    self.instance = instance
1467

    
1468
    new_name = self.op.new_name
1469
    if self.op.name_check:
1470
      hostname = _CheckHostnameSane(self, new_name)
1471
      new_name = self.op.new_name = hostname.name
1472
      if (self.op.ip_check and
1473
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1474
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1475
                                   (hostname.ip, new_name),
1476
                                   errors.ECODE_NOTUNIQUE)
1477

    
1478
    instance_list = self.cfg.GetInstanceList()
1479
    if new_name in instance_list and new_name != instance.name:
1480
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1481
                                 new_name, errors.ECODE_EXISTS)
1482

    
1483
  def Exec(self, feedback_fn):
1484
    """Rename the instance.
1485

1486
    """
1487
    inst = self.instance
1488
    old_name = inst.name
1489

    
1490
    rename_file_storage = False
1491
    if (inst.disk_template in constants.DTS_FILEBASED and
1492
        self.op.new_name != inst.name):
1493
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1494
      rename_file_storage = True
1495

    
1496
    self.cfg.RenameInstance(inst.name, self.op.new_name)
1497
    # Change the instance lock. This is definitely safe while we hold the BGL.
1498
    # Otherwise the new lock would have to be added in acquired mode.
1499
    assert self.REQ_BGL
1500
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1501
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1502
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1503

    
1504
    # re-read the instance from the configuration after rename
1505
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
1506

    
1507
    if rename_file_storage:
1508
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1509
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1510
                                                     old_file_storage_dir,
1511
                                                     new_file_storage_dir)
1512
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1513
                   " (but the instance has been renamed in Ganeti)" %
1514
                   (self.cfg.GetNodeName(inst.primary_node),
1515
                    old_file_storage_dir, new_file_storage_dir))
1516

    
1517
    StartInstanceDisks(self, inst, None)
1518
    # update info on disks
1519
    info = GetInstanceInfoText(inst)
1520
    for (idx, disk) in enumerate(inst.disks):
1521
      for node_uuid in inst.all_nodes:
1522
        self.cfg.SetDiskID(disk, node_uuid)
1523
        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1524
        result.Warn("Error setting info on node %s for disk %s" %
1525
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1526
    try:
1527
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1528
                                                 old_name, self.op.debug_level)
1529
      result.Warn("Could not run OS rename script for instance %s on node %s"
1530
                  " (but the instance has been renamed in Ganeti)" %
1531
                  (inst.name, self.cfg.GetNodeName(inst.primary_node)),
1532
                  self.LogWarning)
1533
    finally:
1534
      ShutdownInstanceDisks(self, inst)
1535

    
1536
    return inst.name
1537

    
1538

    
1539
class LUInstanceRemove(LogicalUnit):
1540
  """Remove an instance.
1541

1542
  """
1543
  HPATH = "instance-remove"
1544
  HTYPE = constants.HTYPE_INSTANCE
1545
  REQ_BGL = False
1546

    
1547
  def ExpandNames(self):
1548
    self._ExpandAndLockInstance()
1549
    self.needed_locks[locking.LEVEL_NODE] = []
1550
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1551
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1552

    
1553
  def DeclareLocks(self, level):
1554
    if level == locking.LEVEL_NODE:
1555
      self._LockInstancesNodes()
1556
    elif level == locking.LEVEL_NODE_RES:
1557
      # Copy node locks
1558
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1559
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1560

    
1561
  def BuildHooksEnv(self):
1562
    """Build hooks env.
1563

1564
    This runs on master, primary and secondary nodes of the instance.
1565

1566
    """
1567
    env = BuildInstanceHookEnvByObject(self, self.instance)
1568
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1569
    return env
1570

    
1571
  def BuildHooksNodes(self):
1572
    """Build hooks nodes.
1573

1574
    """
1575
    nl = [self.cfg.GetMasterNode()]
1576
    nl_post = list(self.instance.all_nodes) + nl
1577
    return (nl, nl_post)
1578

    
1579
  def CheckPrereq(self):
1580
    """Check prerequisites.
1581

1582
    This checks that the instance is in the cluster.
1583

1584
    """
1585
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1586
    assert self.instance is not None, \
1587
      "Cannot retrieve locked instance %s" % self.op.instance_name
1588

    
1589
  def Exec(self, feedback_fn):
1590
    """Remove the instance.
1591

1592
    """
1593
    instance = self.instance
1594
    logging.info("Shutting down instance %s on node %s",
1595
                 instance.name, self.cfg.GetNodeName(instance.primary_node))
1596

    
1597
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1598
                                             self.op.shutdown_timeout,
1599
                                             self.op.reason)
1600
    if self.op.ignore_failures:
1601
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1602
    else:
1603
      result.Raise("Could not shutdown instance %s on node %s" %
1604
                   (instance.name, self.cfg.GetNodeName(instance.primary_node)))
1605

    
1606
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1607
            self.owned_locks(locking.LEVEL_NODE_RES))
1608
    assert not (set(instance.all_nodes) -
1609
                self.owned_locks(locking.LEVEL_NODE)), \
1610
      "Not owning correct locks"
1611

    
1612
    RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1613

    
1614

    
1615
class LUInstanceMove(LogicalUnit):
1616
  """Move an instance by data-copying.
1617

1618
  """
1619
  HPATH = "instance-move"
1620
  HTYPE = constants.HTYPE_INSTANCE
1621
  REQ_BGL = False
1622

    
1623
  def ExpandNames(self):
1624
    self._ExpandAndLockInstance()
1625
    (self.op.target_node_uuid, self.op.target_node) = \
1626
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1627
                            self.op.target_node)
1628
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node]
1629
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1630
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1631

    
1632
  def DeclareLocks(self, level):
1633
    if level == locking.LEVEL_NODE:
1634
      self._LockInstancesNodes(primary_only=True)
1635
    elif level == locking.LEVEL_NODE_RES:
1636
      # Copy node locks
1637
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1638
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1639

    
1640
  def BuildHooksEnv(self):
1641
    """Build hooks env.
1642

1643
    This runs on master, primary and secondary nodes of the instance.
1644

1645
    """
1646
    env = {
1647
      "TARGET_NODE": self.op.target_node,
1648
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1649
      }
1650
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1651
    return env
1652

    
1653
  def BuildHooksNodes(self):
1654
    """Build hooks nodes.
1655

1656
    """
1657
    nl = [
1658
      self.cfg.GetMasterNode(),
1659
      self.instance.primary_node,
1660
      self.op.target_node_uuid,
1661
      ]
1662
    return (nl, nl)
1663

    
1664
  def CheckPrereq(self):
1665
    """Check prerequisites.
1666

1667
    This checks that the instance is in the cluster.
1668

1669
    """
1670
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1671
    assert self.instance is not None, \
1672
      "Cannot retrieve locked instance %s" % self.op.instance_name
1673

    
1674
    if instance.disk_template not in constants.DTS_COPYABLE:
1675
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1676
                                 instance.disk_template, errors.ECODE_STATE)
1677

    
1678
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1679
    assert target_node is not None, \
1680
      "Cannot retrieve locked node %s" % self.op.target_node
1681

    
1682
    self.target_node_uuid = target_node.uuid
1683
    if target_node.uuid == instance.primary_node:
1684
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1685
                                 (instance.name, target_node.name),
1686
                                 errors.ECODE_STATE)
1687

    
1688
    bep = self.cfg.GetClusterInfo().FillBE(instance)
1689

    
1690
    for idx, dsk in enumerate(instance.disks):
1691
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1692
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1693
                                   " cannot copy" % idx, errors.ECODE_STATE)
1694

    
1695
    CheckNodeOnline(self, target_node.uuid)
1696
    CheckNodeNotDrained(self, target_node.uuid)
1697
    CheckNodeVmCapable(self, target_node.uuid)
1698
    cluster = self.cfg.GetClusterInfo()
1699
    group_info = self.cfg.GetNodeGroup(target_node.group)
1700
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1701
    CheckTargetNodeIPolicy(self, ipolicy, instance, target_node, self.cfg,
1702
                           ignore=self.op.ignore_ipolicy)
1703

    
1704
    if instance.admin_state == constants.ADMINST_UP:
1705
      # check memory requirements on the secondary node
1706
      CheckNodeFreeMemory(
1707
          self, target_node.uuid, "failing over instance %s" %
1708
          instance.name, bep[constants.BE_MAXMEM], instance.hypervisor,
1709
          self.cfg.GetClusterInfo().hvparams[instance.hypervisor])
1710
    else:
1711
      self.LogInfo("Not checking memory on the secondary node as"
1712
                   " instance will not be started")
1713

    
1714
    # check bridge existance
1715
    CheckInstanceBridgesExist(self, instance, node_uuid=target_node.uuid)
1716

    
1717
  def Exec(self, feedback_fn):
1718
    """Move an instance.
1719

1720
    The move is done by shutting it down on its present node, copying
1721
    the data over (slow) and starting it on the new node.
1722

1723
    """
1724
    instance = self.instance
1725

    
1726
    source_node = self.cfg.GetNodeInfo(instance.primary_node)
1727
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1728

    
1729
    self.LogInfo("Shutting down instance %s on source node %s",
1730
                 instance.name, source_node.name)
1731

    
1732
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1733
            self.owned_locks(locking.LEVEL_NODE_RES))
1734

    
1735
    result = self.rpc.call_instance_shutdown(source_node.uuid, instance,
1736
                                             self.op.shutdown_timeout,
1737
                                             self.op.reason)
1738
    if self.op.ignore_consistency:
1739
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1740
                  " anyway. Please make sure node %s is down. Error details" %
1741
                  (instance.name, source_node.name, source_node.name),
1742
                  self.LogWarning)
1743
    else:
1744
      result.Raise("Could not shutdown instance %s on node %s" %
1745
                   (instance.name, source_node.name))
1746

    
1747
    # create the target disks
1748
    try:
1749
      CreateDisks(self, instance, target_node_uuid=target_node.uuid)
1750
    except errors.OpExecError:
1751
      self.LogWarning("Device creation failed")
1752
      self.cfg.ReleaseDRBDMinors(instance.name)
1753
      raise
1754

    
1755
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1756

    
1757
    errs = []
1758
    # activate, get path, copy the data over
1759
    for idx, disk in enumerate(instance.disks):
1760
      self.LogInfo("Copying data for disk %d", idx)
1761
      result = self.rpc.call_blockdev_assemble(target_node.uuid,
1762
                                               (disk, instance), instance.name,
1763
                                               True, idx)
1764
      if result.fail_msg:
1765
        self.LogWarning("Can't assemble newly created disk %d: %s",
1766
                        idx, result.fail_msg)
1767
        errs.append(result.fail_msg)
1768
        break
1769
      dev_path = result.payload
1770
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk, instance),
1771
                                             target_node.name, dev_path,
1772
                                             cluster_name)
1773
      if result.fail_msg:
1774
        self.LogWarning("Can't copy data over for disk %d: %s",
1775
                        idx, result.fail_msg)
1776
        errs.append(result.fail_msg)
1777
        break
1778

    
1779
    if errs:
1780
      self.LogWarning("Some disks failed to copy, aborting")
1781
      try:
1782
        RemoveDisks(self, instance, target_node_uuid=target_node.uuid)
1783
      finally:
1784
        self.cfg.ReleaseDRBDMinors(instance.name)
1785
        raise errors.OpExecError("Errors during disk copy: %s" %
1786
                                 (",".join(errs),))
1787

    
1788
    instance.primary_node = target_node.uuid
1789
    self.cfg.Update(instance, feedback_fn)
1790

    
1791
    self.LogInfo("Removing the disks on the original node")
1792
    RemoveDisks(self, instance, target_node_uuid=source_node.uuid)
1793

    
1794
    # Only start the instance if it's marked as up
1795
    if instance.admin_state == constants.ADMINST_UP:
1796
      self.LogInfo("Starting instance %s on node %s",
1797
                   instance.name, target_node.name)
1798

    
1799
      disks_ok, _ = AssembleInstanceDisks(self, instance,
1800
                                          ignore_secondaries=True)
1801
      if not disks_ok:
1802
        ShutdownInstanceDisks(self, instance)
1803
        raise errors.OpExecError("Can't activate the instance's disks")
1804

    
1805
      result = self.rpc.call_instance_start(target_node.uuid,
1806
                                            (instance, None, None), False,
1807
                                            self.op.reason)
1808
      msg = result.fail_msg
1809
      if msg:
1810
        ShutdownInstanceDisks(self, instance)
1811
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1812
                                 (instance.name, target_node.name, msg))
1813

    
1814

    
1815
class LUInstanceMultiAlloc(NoHooksLU):
1816
  """Allocates multiple instances at the same time.
1817

1818
  """
1819
  REQ_BGL = False
1820

    
1821
  def CheckArguments(self):
1822
    """Check arguments.
1823

1824
    """
1825
    nodes = []
1826
    for inst in self.op.instances:
1827
      if inst.iallocator is not None:
1828
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1829
                                   " instance objects", errors.ECODE_INVAL)
1830
      nodes.append(bool(inst.pnode))
1831
      if inst.disk_template in constants.DTS_INT_MIRROR:
1832
        nodes.append(bool(inst.snode))
1833

    
1834
    has_nodes = compat.any(nodes)
1835
    if compat.all(nodes) ^ has_nodes:
1836
      raise errors.OpPrereqError("There are instance objects providing"
1837
                                 " pnode/snode while others do not",
1838
                                 errors.ECODE_INVAL)
1839

    
1840
    if self.op.iallocator is None:
1841
      default_iallocator = self.cfg.GetDefaultIAllocator()
1842
      if default_iallocator and has_nodes:
1843
        self.op.iallocator = default_iallocator
1844
      else:
1845
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1846
                                   " given and no cluster-wide default"
1847
                                   " iallocator found; please specify either"
1848
                                   " an iallocator or nodes on the instances"
1849
                                   " or set a cluster-wide default iallocator",
1850
                                   errors.ECODE_INVAL)
1851

    
1852
    _CheckOpportunisticLocking(self.op)
1853

    
1854
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1855
    if dups:
1856
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1857
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1858

    
1859
  def ExpandNames(self):
1860
    """Calculate the locks.
1861

1862
    """
1863
    self.share_locks = ShareAll()
1864
    self.needed_locks = {
1865
      # iallocator will select nodes and even if no iallocator is used,
1866
      # collisions with LUInstanceCreate should be avoided
1867
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1868
      }
1869

    
1870
    if self.op.iallocator:
1871
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1872
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1873

    
1874
      if self.op.opportunistic_locking:
1875
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1876
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1877
    else:
1878
      nodeslist = []
1879
      for inst in self.op.instances:
1880
        (inst.pnode_uuid, inst.pnode) = \
1881
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1882
        nodeslist.append(inst.pnode)
1883
        if inst.snode is not None:
1884
          (inst.snode_uuid, inst.snode) = \
1885
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1886
          nodeslist.append(inst.snode)
1887

    
1888
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1889
      # Lock resources of instance's primary and secondary nodes (copy to
1890
      # prevent accidential modification)
1891
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1892

    
1893
  def CheckPrereq(self):
1894
    """Check prerequisite.
1895

1896
    """
1897
    cluster = self.cfg.GetClusterInfo()
1898
    default_vg = self.cfg.GetVGName()
1899
    ec_id = self.proc.GetECId()
1900

    
1901
    if self.op.opportunistic_locking:
1902
      # Only consider nodes for which a lock is held
1903
      node_whitelist = self.cfg.GetNodeNames(
1904
                         list(self.owned_locks(locking.LEVEL_NODE)))
1905
    else:
1906
      node_whitelist = None
1907

    
1908
    insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1909
                                         _ComputeNics(op, cluster, None,
1910
                                                      self.cfg, ec_id),
1911
                                         _ComputeFullBeParams(op, cluster),
1912
                                         node_whitelist)
1913
             for op in self.op.instances]
1914

    
1915
    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1916
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1917

    
1918
    ial.Run(self.op.iallocator)
1919

    
1920
    if not ial.success:
1921
      raise errors.OpPrereqError("Can't compute nodes using"
1922
                                 " iallocator '%s': %s" %
1923
                                 (self.op.iallocator, ial.info),
1924
                                 errors.ECODE_NORES)
1925

    
1926
    self.ia_result = ial.result
1927

    
1928
    if self.op.dry_run:
1929
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1930
        constants.JOB_IDS_KEY: [],
1931
        })
1932

    
1933
  def _ConstructPartialResult(self):
1934
    """Contructs the partial result.
1935

1936
    """
1937
    (allocatable, failed) = self.ia_result
1938
    return {
1939
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1940
        map(compat.fst, allocatable),
1941
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1942
      }
1943

    
1944
  def Exec(self, feedback_fn):
1945
    """Executes the opcode.
1946

1947
    """
1948
    op2inst = dict((op.instance_name, op) for op in self.op.instances)
1949
    (allocatable, failed) = self.ia_result
1950

    
1951
    jobs = []
1952
    for (name, node_names) in allocatable:
1953
      op = op2inst.pop(name)
1954

    
1955
      (op.pnode_uuid, op.pnode) = \
1956
        ExpandNodeUuidAndName(self.cfg, None, node_names[0])
1957
      if len(node_names) > 1:
1958
        (op.snode_uuid, op.snode) = \
1959
          ExpandNodeUuidAndName(self.cfg, None, node_names[1])
1960

    
1961
      jobs.append([op])
1962

    
1963
    missing = set(op2inst.keys()) - set(failed)
1964
    assert not missing, \
1965
      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1966

    
1967
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
1968

    
1969

    
1970
class _InstNicModPrivate:
1971
  """Data structure for network interface modifications.
1972

1973
  Used by L{LUInstanceSetParams}.
1974

1975
  """
1976
  def __init__(self):
1977
    self.params = None
1978
    self.filled = None
1979

    
1980

    
1981
def _PrepareContainerMods(mods, private_fn):
1982
  """Prepares a list of container modifications by adding a private data field.
1983

1984
  @type mods: list of tuples; (operation, index, parameters)
1985
  @param mods: List of modifications
1986
  @type private_fn: callable or None
1987
  @param private_fn: Callable for constructing a private data field for a
1988
    modification
1989
  @rtype: list
1990

1991
  """
1992
  if private_fn is None:
1993
    fn = lambda: None
1994
  else:
1995
    fn = private_fn
1996

    
1997
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
1998

    
1999

    
2000
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2001
  """Checks if nodes have enough physical CPUs
2002

2003
  This function checks if all given nodes have the needed number of
2004
  physical CPUs. In case any node has less CPUs or we cannot get the
2005
  information from the node, this function raises an OpPrereqError
2006
  exception.
2007

2008
  @type lu: C{LogicalUnit}
2009
  @param lu: a logical unit from which we get configuration data
2010
  @type node_uuids: C{list}
2011
  @param node_uuids: the list of node UUIDs to check
2012
  @type requested: C{int}
2013
  @param requested: the minimum acceptable number of physical CPUs
2014
  @type hypervisor_specs: list of pairs (string, dict of strings)
2015
  @param hypervisor_specs: list of hypervisor specifications in
2016
      pairs (hypervisor_name, hvparams)
2017
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2018
      or we cannot check the node
2019

2020
  """
2021
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs, None)
2022
  for node_uuid in node_uuids:
2023
    info = nodeinfo[node_uuid]
2024
    node_name = lu.cfg.GetNodeName(node_uuid)
2025
    info.Raise("Cannot get current information from node %s" % node_name,
2026
               prereq=True, ecode=errors.ECODE_ENVIRON)
2027
    (_, _, (hv_info, )) = info.payload
2028
    num_cpus = hv_info.get("cpu_total", None)
2029
    if not isinstance(num_cpus, int):
2030
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2031
                                 " on node %s, result was '%s'" %
2032
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2033
    if requested > num_cpus:
2034
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2035
                                 "required" % (node_name, num_cpus, requested),
2036
                                 errors.ECODE_NORES)
2037

    
2038

    
2039
def GetItemFromContainer(identifier, kind, container):
2040
  """Return the item refered by the identifier.
2041

2042
  @type identifier: string
2043
  @param identifier: Item index or name or UUID
2044
  @type kind: string
2045
  @param kind: One-word item description
2046
  @type container: list
2047
  @param container: Container to get the item from
2048

2049
  """
2050
  # Index
2051
  try:
2052
    idx = int(identifier)
2053
    if idx == -1:
2054
      # Append
2055
      absidx = len(container) - 1
2056
    elif idx < 0:
2057
      raise IndexError("Not accepting negative indices other than -1")
2058
    elif idx > len(container):
2059
      raise IndexError("Got %s index %s, but there are only %s" %
2060
                       (kind, idx, len(container)))
2061
    else:
2062
      absidx = idx
2063
    return (absidx, container[idx])
2064
  except ValueError:
2065
    pass
2066

    
2067
  for idx, item in enumerate(container):
2068
    if item.uuid == identifier or item.name == identifier:
2069
      return (idx, item)
2070

    
2071
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2072
                             (kind, identifier), errors.ECODE_NOENT)
2073

    
2074

    
2075
def _ApplyContainerMods(kind, container, chgdesc, mods,
2076
                        create_fn, modify_fn, remove_fn):
2077
  """Applies descriptions in C{mods} to C{container}.
2078

2079
  @type kind: string
2080
  @param kind: One-word item description
2081
  @type container: list
2082
  @param container: Container to modify
2083
  @type chgdesc: None or list
2084
  @param chgdesc: List of applied changes
2085
  @type mods: list
2086
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2087
  @type create_fn: callable
2088
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2089
    receives absolute item index, parameters and private data object as added
2090
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2091
    as list
2092
  @type modify_fn: callable
2093
  @param modify_fn: Callback for modifying an existing item
2094
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2095
    and private data object as added by L{_PrepareContainerMods}, returns
2096
    changes as list
2097
  @type remove_fn: callable
2098
  @param remove_fn: Callback on removing item; receives absolute item index,
2099
    item and private data object as added by L{_PrepareContainerMods}
2100

2101
  """
2102
  for (op, identifier, params, private) in mods:
2103
    changes = None
2104

    
2105
    if op == constants.DDM_ADD:
2106
      # Calculate where item will be added
2107
      # When adding an item, identifier can only be an index
2108
      try:
2109
        idx = int(identifier)
2110
      except ValueError:
2111
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2112
                                   " identifier for %s" % constants.DDM_ADD,
2113
                                   errors.ECODE_INVAL)
2114
      if idx == -1:
2115
        addidx = len(container)
2116
      else:
2117
        if idx < 0:
2118
          raise IndexError("Not accepting negative indices other than -1")
2119
        elif idx > len(container):
2120
          raise IndexError("Got %s index %s, but there are only %s" %
2121
                           (kind, idx, len(container)))
2122
        addidx = idx
2123

    
2124
      if create_fn is None:
2125
        item = params
2126
      else:
2127
        (item, changes) = create_fn(addidx, params, private)
2128

    
2129
      if idx == -1:
2130
        container.append(item)
2131
      else:
2132
        assert idx >= 0
2133
        assert idx <= len(container)
2134
        # list.insert does so before the specified index
2135
        container.insert(idx, item)
2136
    else:
2137
      # Retrieve existing item
2138
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2139

    
2140
      if op == constants.DDM_REMOVE:
2141
        assert not params
2142

    
2143
        if remove_fn is not None:
2144
          remove_fn(absidx, item, private)
2145

    
2146
        changes = [("%s/%s" % (kind, absidx), "remove")]
2147

    
2148
        assert container[absidx] == item
2149
        del container[absidx]
2150
      elif op == constants.DDM_MODIFY:
2151
        if modify_fn is not None:
2152
          changes = modify_fn(absidx, item, params, private)
2153
      else:
2154
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2155

    
2156
    assert _TApplyContModsCbChanges(changes)
2157

    
2158
    if not (chgdesc is None or changes is None):
2159
      chgdesc.extend(changes)
2160

    
2161

    
2162
def _UpdateIvNames(base_index, disks):
2163
  """Updates the C{iv_name} attribute of disks.
2164

2165
  @type disks: list of L{objects.Disk}
2166

2167
  """
2168
  for (idx, disk) in enumerate(disks):
2169
    disk.iv_name = "disk/%s" % (base_index + idx, )
2170

    
2171

    
2172
class LUInstanceSetParams(LogicalUnit):
2173
  """Modifies an instances's parameters.
2174

2175
  """
2176
  HPATH = "instance-modify"
2177
  HTYPE = constants.HTYPE_INSTANCE
2178
  REQ_BGL = False
2179

    
2180
  @staticmethod
2181
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2182
    assert ht.TList(mods)
2183
    assert not mods or len(mods[0]) in (2, 3)
2184

    
2185
    if mods and len(mods[0]) == 2:
2186
      result = []
2187

    
2188
      addremove = 0
2189
      for op, params in mods:
2190
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2191
          result.append((op, -1, params))
2192
          addremove += 1
2193

    
2194
          if addremove > 1:
2195
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2196
                                       " supported at a time" % kind,
2197
                                       errors.ECODE_INVAL)
2198
        else:
2199
          result.append((constants.DDM_MODIFY, op, params))
2200

    
2201
      assert verify_fn(result)
2202
    else:
2203
      result = mods
2204

    
2205
    return result
2206

    
2207
  @staticmethod
2208
  def _CheckMods(kind, mods, key_types, item_fn):
2209
    """Ensures requested disk/NIC modifications are valid.
2210

2211
    """
2212
    for (op, _, params) in mods:
2213
      assert ht.TDict(params)
2214

    
2215
      # If 'key_types' is an empty dict, we assume we have an
2216
      # 'ext' template and thus do not ForceDictType
2217
      if key_types:
2218
        utils.ForceDictType(params, key_types)
2219

    
2220
      if op == constants.DDM_REMOVE:
2221
        if params:
2222
          raise errors.OpPrereqError("No settings should be passed when"
2223
                                     " removing a %s" % kind,
2224
                                     errors.ECODE_INVAL)
2225
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2226
        item_fn(op, params)
2227
      else:
2228
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2229

    
2230
  @staticmethod
2231
  def _VerifyDiskModification(op, params, excl_stor):
2232
    """Verifies a disk modification.
2233

2234
    """
2235
    if op == constants.DDM_ADD:
2236
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2237
      if mode not in constants.DISK_ACCESS_SET:
2238
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2239
                                   errors.ECODE_INVAL)
2240

    
2241
      size = params.get(constants.IDISK_SIZE, None)
2242
      if size is None:
2243
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2244
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2245

    
2246
      try:
2247
        size = int(size)
2248
      except (TypeError, ValueError), err:
2249
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2250
                                   errors.ECODE_INVAL)
2251

    
2252
      params[constants.IDISK_SIZE] = size
2253
      name = params.get(constants.IDISK_NAME, None)
2254
      if name is not None and name.lower() == constants.VALUE_NONE:
2255
        params[constants.IDISK_NAME] = None
2256

    
2257
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2258

    
2259
    elif op == constants.DDM_MODIFY:
2260
      if constants.IDISK_SIZE in params:
2261
        raise errors.OpPrereqError("Disk size change not possible, use"
2262
                                   " grow-disk", errors.ECODE_INVAL)
2263
      if len(params) > 2:
2264
        raise errors.OpPrereqError("Disk modification doesn't support"
2265
                                   " additional arbitrary parameters",
2266
                                   errors.ECODE_INVAL)
2267
      name = params.get(constants.IDISK_NAME, None)
2268
      if name is not None and name.lower() == constants.VALUE_NONE:
2269
        params[constants.IDISK_NAME] = None
2270

    
2271
  @staticmethod
2272
  def _VerifyNicModification(op, params):
2273
    """Verifies a network interface modification.
2274

2275
    """
2276
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2277
      ip = params.get(constants.INIC_IP, None)
2278
      name = params.get(constants.INIC_NAME, None)
2279
      req_net = params.get(constants.INIC_NETWORK, None)
2280
      link = params.get(constants.NIC_LINK, None)
2281
      mode = params.get(constants.NIC_MODE, None)
2282
      if name is not None and name.lower() == constants.VALUE_NONE:
2283
        params[constants.INIC_NAME] = None
2284
      if req_net is not None:
2285
        if req_net.lower() == constants.VALUE_NONE:
2286
          params[constants.INIC_NETWORK] = None
2287
          req_net = None
2288
        elif link is not None or mode is not None:
2289
          raise errors.OpPrereqError("If network is given"
2290
                                     " mode or link should not",
2291
                                     errors.ECODE_INVAL)
2292

    
2293
      if op == constants.DDM_ADD:
2294
        macaddr = params.get(constants.INIC_MAC, None)
2295
        if macaddr is None:
2296
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2297

    
2298
      if ip is not None:
2299
        if ip.lower() == constants.VALUE_NONE:
2300
          params[constants.INIC_IP] = None
2301
        else:
2302
          if ip.lower() == constants.NIC_IP_POOL:
2303
            if op == constants.DDM_ADD and req_net is None:
2304
              raise errors.OpPrereqError("If ip=pool, parameter network"
2305
                                         " cannot be none",
2306
                                         errors.ECODE_INVAL)
2307
          else:
2308
            if not netutils.IPAddress.IsValid(ip):
2309
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2310
                                         errors.ECODE_INVAL)
2311

    
2312
      if constants.INIC_MAC in params:
2313
        macaddr = params[constants.INIC_MAC]
2314
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2315
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2316

    
2317
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2318
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2319
                                     " modifying an existing NIC",
2320
                                     errors.ECODE_INVAL)
2321

    
2322
  def CheckArguments(self):
2323
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2324
            self.op.hvparams or self.op.beparams or self.op.os_name or
2325
            self.op.offline is not None or self.op.runtime_mem or
2326
            self.op.pnode):
2327
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2328

    
2329
    if self.op.hvparams:
2330
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2331
                           "hypervisor", "instance", "cluster")
2332

    
2333
    self.op.disks = self._UpgradeDiskNicMods(
2334
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2335
    self.op.nics = self._UpgradeDiskNicMods(
2336
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2337

    
2338
    if self.op.disks and self.op.disk_template is not None:
2339
      raise errors.OpPrereqError("Disk template conversion and other disk"
2340
                                 " changes not supported at the same time",
2341
                                 errors.ECODE_INVAL)
2342

    
2343
    if (self.op.disk_template and
2344
        self.op.disk_template in constants.DTS_INT_MIRROR and
2345
        self.op.remote_node is None):
2346
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2347
                                 " one requires specifying a secondary node",
2348
                                 errors.ECODE_INVAL)
2349

    
2350
    # Check NIC modifications
2351
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2352
                    self._VerifyNicModification)
2353

    
2354
    if self.op.pnode:
2355
      (self.op.pnode_uuid, self.op.pnode) = \
2356
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2357

    
2358
  def ExpandNames(self):
2359
    self._ExpandAndLockInstance()
2360
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2361
    # Can't even acquire node locks in shared mode as upcoming changes in
2362
    # Ganeti 2.6 will start to modify the node object on disk conversion
2363
    self.needed_locks[locking.LEVEL_NODE] = []
2364
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2365
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2366
    # Look node group to look up the ipolicy
2367
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2368

    
2369
  def DeclareLocks(self, level):
2370
    if level == locking.LEVEL_NODEGROUP:
2371
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2372
      # Acquire locks for the instance's nodegroups optimistically. Needs
2373
      # to be verified in CheckPrereq
2374
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2375
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2376
    elif level == locking.LEVEL_NODE:
2377
      self._LockInstancesNodes()
2378
      if self.op.disk_template and self.op.remote_node:
2379
        (self.op.remote_node_uuid, self.op.remote_node) = \
2380
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2381
                                self.op.remote_node)
2382
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2383
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2384
      # Copy node locks
2385
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2386
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2387

    
2388
  def BuildHooksEnv(self):
2389
    """Build hooks env.
2390

2391
    This runs on the master, primary and secondaries.
2392

2393
    """
2394
    args = {}
2395
    if constants.BE_MINMEM in self.be_new:
2396
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2397
    if constants.BE_MAXMEM in self.be_new:
2398
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2399
    if constants.BE_VCPUS in self.be_new:
2400
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2401
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2402
    # information at all.
2403

    
2404
    if self._new_nics is not None:
2405
      nics = []
2406

    
2407
      for nic in self._new_nics:
2408
        n = copy.deepcopy(nic)
2409
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2410
        n.nicparams = nicparams
2411
        nics.append(NICToTuple(self, n))
2412

    
2413
      args["nics"] = nics
2414

    
2415
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2416
    if self.op.disk_template:
2417
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2418
    if self.op.runtime_mem:
2419
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2420

    
2421
    return env
2422

    
2423
  def BuildHooksNodes(self):
2424
    """Build hooks nodes.
2425

2426
    """
2427
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2428
    return (nl, nl)
2429

    
2430
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2431
                              old_params, cluster, pnode_uuid):
2432

    
2433
    update_params_dict = dict([(key, params[key])
2434
                               for key in constants.NICS_PARAMETERS
2435
                               if key in params])
2436

    
2437
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2438
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2439

    
2440
    new_net_uuid = None
2441
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2442
    if new_net_uuid_or_name:
2443
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2444
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2445

    
2446
    if old_net_uuid:
2447
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2448

    
2449
    if new_net_uuid:
2450
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2451
      if not netparams:
2452
        raise errors.OpPrereqError("No netparams found for the network"
2453
                                   " %s, probably not connected" %
2454
                                   new_net_obj.name, errors.ECODE_INVAL)
2455
      new_params = dict(netparams)
2456
    else:
2457
      new_params = GetUpdatedParams(old_params, update_params_dict)
2458

    
2459
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2460

    
2461
    new_filled_params = cluster.SimpleFillNIC(new_params)
2462
    objects.NIC.CheckParameterSyntax(new_filled_params)
2463

    
2464
    new_mode = new_filled_params[constants.NIC_MODE]
2465
    if new_mode == constants.NIC_MODE_BRIDGED:
2466
      bridge = new_filled_params[constants.NIC_LINK]
2467
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2468
      if msg:
2469
        msg = "Error checking bridges on node '%s': %s" % \
2470
                (self.cfg.GetNodeName(pnode_uuid), msg)
2471
        if self.op.force:
2472
          self.warn.append(msg)
2473
        else:
2474
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2475

    
2476
    elif new_mode == constants.NIC_MODE_ROUTED:
2477
      ip = params.get(constants.INIC_IP, old_ip)
2478
      if ip is None:
2479
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2480
                                   " on a routed NIC", errors.ECODE_INVAL)
2481

    
2482
    elif new_mode == constants.NIC_MODE_OVS:
2483
      # TODO: check OVS link
2484
      self.LogInfo("OVS links are currently not checked for correctness")
2485

    
2486
    if constants.INIC_MAC in params:
2487
      mac = params[constants.INIC_MAC]
2488
      if mac is None:
2489
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2490
                                   errors.ECODE_INVAL)
2491
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2492
        # otherwise generate the MAC address
2493
        params[constants.INIC_MAC] = \
2494
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2495
      else:
2496
        # or validate/reserve the current one
2497
        try:
2498
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2499
        except errors.ReservationError:
2500
          raise errors.OpPrereqError("MAC address '%s' already in use"
2501
                                     " in cluster" % mac,
2502
                                     errors.ECODE_NOTUNIQUE)
2503
    elif new_net_uuid != old_net_uuid:
2504

    
2505
      def get_net_prefix(net_uuid):
2506
        mac_prefix = None
2507
        if net_uuid:
2508
          nobj = self.cfg.GetNetwork(net_uuid)
2509
          mac_prefix = nobj.mac_prefix
2510

    
2511
        return mac_prefix
2512

    
2513
      new_prefix = get_net_prefix(new_net_uuid)
2514
      old_prefix = get_net_prefix(old_net_uuid)
2515
      if old_prefix != new_prefix:
2516
        params[constants.INIC_MAC] = \
2517
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2518

    
2519
    # if there is a change in (ip, network) tuple
2520
    new_ip = params.get(constants.INIC_IP, old_ip)
2521
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2522
      if new_ip:
2523
        # if IP is pool then require a network and generate one IP
2524
        if new_ip.lower() == constants.NIC_IP_POOL:
2525
          if new_net_uuid:
2526
            try:
2527
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2528
            except errors.ReservationError:
2529
              raise errors.OpPrereqError("Unable to get a free IP"
2530
                                         " from the address pool",
2531
                                         errors.ECODE_STATE)
2532
            self.LogInfo("Chose IP %s from network %s",
2533
                         new_ip,
2534
                         new_net_obj.name)
2535
            params[constants.INIC_IP] = new_ip
2536
          else:
2537
            raise errors.OpPrereqError("ip=pool, but no network found",
2538
                                       errors.ECODE_INVAL)
2539
        # Reserve new IP if in the new network if any
2540
        elif new_net_uuid:
2541
          try:
2542
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2543
            self.LogInfo("Reserving IP %s in network %s",
2544
                         new_ip, new_net_obj.name)
2545
          except errors.ReservationError:
2546
            raise errors.OpPrereqError("IP %s not available in network %s" %
2547
                                       (new_ip, new_net_obj.name),
2548
                                       errors.ECODE_NOTUNIQUE)
2549
        # new network is None so check if new IP is a conflicting IP
2550
        elif self.op.conflicts_check:
2551
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2552

    
2553
      # release old IP if old network is not None
2554
      if old_ip and old_net_uuid:
2555
        try:
2556
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2557
        except errors.AddressPoolError:
2558
          logging.warning("Release IP %s not contained in network %s",
2559
                          old_ip, old_net_obj.name)
2560

    
2561
    # there are no changes in (ip, network) tuple and old network is not None
2562
    elif (old_net_uuid is not None and
2563
          (req_link is not None or req_mode is not None)):
2564
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2565
                                 " a NIC that is connected to a network",
2566
                                 errors.ECODE_INVAL)
2567

    
2568
    private.params = new_params
2569
    private.filled = new_filled_params
2570

    
2571
  def _PreCheckDiskTemplate(self, pnode_info):
2572
    """CheckPrereq checks related to a new disk template."""
2573
    # Arguments are passed to avoid configuration lookups
2574
    instance = self.instance
2575
    pnode_uuid = instance.primary_node
2576
    cluster = self.cluster
2577
    if instance.disk_template == self.op.disk_template:
2578
      raise errors.OpPrereqError("Instance already has disk template %s" %
2579
                                 instance.disk_template, errors.ECODE_INVAL)
2580

    
2581
    if (instance.disk_template,
2582
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2583
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2584
                                 " %s to %s" % (instance.disk_template,
2585
                                                self.op.disk_template),
2586
                                 errors.ECODE_INVAL)
2587
    CheckInstanceState(self, instance, INSTANCE_DOWN,
2588
                       msg="cannot change disk template")
2589
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2590
      if self.op.remote_node_uuid == pnode_uuid:
2591
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2592
                                   " as the primary node of the instance" %
2593
                                   self.op.remote_node, errors.ECODE_STATE)
2594
      CheckNodeOnline(self, self.op.remote_node_uuid)
2595
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2596
      # FIXME: here we assume that the old instance type is DT_PLAIN
2597
      assert instance.disk_template == constants.DT_PLAIN
2598
      disks = [{constants.IDISK_SIZE: d.size,
2599
                constants.IDISK_VG: d.logical_id[0]}
2600
               for d in instance.disks]
2601
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2602
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2603

    
2604
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2605
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2606
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2607
                                                              snode_group)
2608
      CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2609
                             ignore=self.op.ignore_ipolicy)
2610
      if pnode_info.group != snode_info.group:
2611
        self.LogWarning("The primary and secondary nodes are in two"
2612
                        " different node groups; the disk parameters"
2613
                        " from the first disk's node group will be"
2614
                        " used")
2615

    
2616
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2617
      # Make sure none of the nodes require exclusive storage
2618
      nodes = [pnode_info]
2619
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2620
        assert snode_info
2621
        nodes.append(snode_info)
2622
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2623
      if compat.any(map(has_es, nodes)):
2624
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2625
                  " storage is enabled" % (instance.disk_template,
2626
                                           self.op.disk_template))
2627
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2628

    
2629
  def _PreCheckDisks(self, ispec):
2630
    """CheckPrereq checks related to disk changes.
2631

2632
    @type ispec: dict
2633
    @param ispec: instance specs to be updated with the new disks
2634

2635
    """
2636
    instance = self.instance
2637
    self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2638

    
2639
    excl_stor = compat.any(
2640
      rpc.GetExclusiveStorageForNodes(self.cfg, instance.all_nodes).values()
2641
      )
2642

    
2643
    # Check disk modifications. This is done here and not in CheckArguments
2644
    # (as with NICs), because we need to know the instance's disk template
2645
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2646
    if instance.disk_template == constants.DT_EXT:
2647
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2648
    else:
2649
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2650
                      ver_fn)
2651

    
2652
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2653

    
2654
    # Check the validity of the `provider' parameter
2655
    if instance.disk_template in constants.DT_EXT:
2656
      for mod in self.diskmod:
2657
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2658
        if mod[0] == constants.DDM_ADD:
2659
          if ext_provider is None:
2660
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2661
                                       " '%s' missing, during disk add" %
2662
                                       (constants.DT_EXT,
2663
                                        constants.IDISK_PROVIDER),
2664
                                       errors.ECODE_NOENT)
2665
        elif mod[0] == constants.DDM_MODIFY:
2666
          if ext_provider:
2667
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2668
                                       " modification" %
2669
                                       constants.IDISK_PROVIDER,
2670
                                       errors.ECODE_INVAL)
2671
    else:
2672
      for mod in self.diskmod:
2673
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2674
        if ext_provider is not None:
2675
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2676
                                     " instances of type '%s'" %
2677
                                     (constants.IDISK_PROVIDER,
2678
                                      constants.DT_EXT),
2679
                                     errors.ECODE_INVAL)
2680

    
2681
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2682
      raise errors.OpPrereqError("Disk operations not supported for"
2683
                                 " diskless instances", errors.ECODE_INVAL)
2684

    
2685
    def _PrepareDiskMod(_, disk, params, __):
2686
      disk.name = params.get(constants.IDISK_NAME, None)
2687

    
2688
    # Verify disk changes (operating on a copy)
2689
    disks = copy.deepcopy(instance.disks)
2690
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2691
                        _PrepareDiskMod, None)
2692
    utils.ValidateDeviceNames("disk", disks)
2693
    if len(disks) > constants.MAX_DISKS:
2694
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2695
                                 " more" % constants.MAX_DISKS,
2696
                                 errors.ECODE_STATE)
2697
    disk_sizes = [disk.size for disk in instance.disks]
2698
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2699
                      self.diskmod if op == constants.DDM_ADD)
2700
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2701
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2702

    
2703
    if self.op.offline is not None and self.op.offline:
2704
      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2705
                         msg="can't change to offline")
2706

    
2707
  def CheckPrereq(self):
2708
    """Check prerequisites.
2709

2710
    This only checks the instance list against the existing names.
2711

2712
    """
2713
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2714
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2715

    
2716
    cluster = self.cluster = self.cfg.GetClusterInfo()
2717
    assert self.instance is not None, \
2718
      "Cannot retrieve locked instance %s" % self.op.instance_name
2719

    
2720
    pnode_uuid = instance.primary_node
2721

    
2722
    self.warn = []
2723

    
2724
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2725
        not self.op.force):
2726
      # verify that the instance is not up
2727
      instance_info = self.rpc.call_instance_info(
2728
          pnode_uuid, instance.name, instance.hypervisor, instance.hvparams)
2729
      if instance_info.fail_msg:
2730
        self.warn.append("Can't get instance runtime information: %s" %
2731
                         instance_info.fail_msg)
2732
      elif instance_info.payload:
2733
        raise errors.OpPrereqError("Instance is still running on %s" %
2734
                                   self.cfg.GetNodeName(pnode_uuid),
2735
                                   errors.ECODE_STATE)
2736

    
2737
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2738
    node_uuids = list(instance.all_nodes)
2739
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2740

    
2741
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2742
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2743
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2744

    
2745
    # dictionary with instance information after the modification
2746
    ispec = {}
2747

    
2748
    # Prepare NIC modifications
2749
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2750

    
2751
    # OS change
2752
    if self.op.os_name and not self.op.force:
2753
      CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2754
                     self.op.force_variant)
2755
      instance_os = self.op.os_name
2756
    else:
2757
      instance_os = instance.os
2758

    
2759
    assert not (self.op.disk_template and self.op.disks), \
2760
      "Can't modify disk template and apply disk changes at the same time"
2761

    
2762
    if self.op.disk_template:
2763
      self._PreCheckDiskTemplate(pnode_info)
2764

    
2765
    self._PreCheckDisks(ispec)
2766

    
2767
    # hvparams processing
2768
    if self.op.hvparams:
2769
      hv_type = instance.hypervisor
2770
      i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2771
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2772
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2773

    
2774
      # local check
2775
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2776
      CheckHVParams(self, node_uuids, instance.hypervisor, hv_new)
2777
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2778
      self.hv_inst = i_hvdict # the new dict (without defaults)
2779
    else:
2780
      self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2781
                                              instance.hvparams)
2782
      self.hv_new = self.hv_inst = {}
2783

    
2784
    # beparams processing
2785
    if self.op.beparams:
2786
      i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2787
                                  use_none=True)
2788
      objects.UpgradeBeParams(i_bedict)
2789
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2790
      be_new = cluster.SimpleFillBE(i_bedict)
2791
      self.be_proposed = self.be_new = be_new # the new actual values
2792
      self.be_inst = i_bedict # the new dict (without defaults)
2793
    else:
2794
      self.be_new = self.be_inst = {}
2795
      self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2796
    be_old = cluster.FillBE(instance)
2797

    
2798
    # CPU param validation -- checking every time a parameter is
2799
    # changed to cover all cases where either CPU mask or vcpus have
2800
    # changed
2801
    if (constants.BE_VCPUS in self.be_proposed and
2802
        constants.HV_CPU_MASK in self.hv_proposed):
2803
      cpu_list = \
2804
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2805
      # Verify mask is consistent with number of vCPUs. Can skip this
2806
      # test if only 1 entry in the CPU mask, which means same mask
2807
      # is applied to all vCPUs.
2808
      if (len(cpu_list) > 1 and
2809
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2810
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2811
                                   " CPU mask [%s]" %
2812
                                   (self.be_proposed[constants.BE_VCPUS],
2813
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2814
                                   errors.ECODE_INVAL)
2815

    
2816
      # Only perform this test if a new CPU mask is given
2817
      if constants.HV_CPU_MASK in self.hv_new:
2818
        # Calculate the largest CPU number requested
2819
        max_requested_cpu = max(map(max, cpu_list))
2820
        # Check that all of the instance's nodes have enough physical CPUs to
2821
        # satisfy the requested CPU mask
2822
        hvspecs = [(instance.hypervisor,
2823
                    self.cfg.GetClusterInfo().hvparams[instance.hypervisor])]
2824
        _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2825
                                max_requested_cpu + 1,
2826
                                hvspecs)
2827

    
2828
    # osparams processing
2829
    if self.op.osparams:
2830
      i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2831
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2832
      self.os_inst = i_osdict # the new dict (without defaults)
2833
    else:
2834
      self.os_inst = {}
2835

    
2836
    #TODO(dynmem): do the appropriate check involving MINMEM
2837
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2838
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2839
      mem_check_list = [pnode_uuid]
2840
      if be_new[constants.BE_AUTO_BALANCE]:
2841
        # either we changed auto_balance to yes or it was from before
2842
        mem_check_list.extend(instance.secondary_nodes)
2843
      instance_info = self.rpc.call_instance_info(
2844
          pnode_uuid, instance.name, instance.hypervisor, instance.hvparams)
2845
      hvspecs = [(instance.hypervisor, cluster.hvparams[instance.hypervisor])]
2846
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2847
                                         hvspecs, False)
2848
      pninfo = nodeinfo[pnode_uuid]
2849
      msg = pninfo.fail_msg
2850
      if msg:
2851
        # Assume the primary node is unreachable and go ahead
2852
        self.warn.append("Can't get info from primary node %s: %s" %
2853
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2854
      else:
2855
        (_, _, (pnhvinfo, )) = pninfo.payload
2856
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2857
          self.warn.append("Node data from primary node %s doesn't contain"
2858
                           " free memory information" %
2859
                           self.cfg.GetNodeName(pnode_uuid))
2860
        elif instance_info.fail_msg:
2861
          self.warn.append("Can't get instance runtime information: %s" %
2862
                           instance_info.fail_msg)
2863
        else:
2864
          if instance_info.payload:
2865
            current_mem = int(instance_info.payload["memory"])
2866
          else:
2867
            # Assume instance not running
2868
            # (there is a slight race condition here, but it's not very
2869
            # probable, and we have no other way to check)
2870
            # TODO: Describe race condition
2871
            current_mem = 0
2872
          #TODO(dynmem): do the appropriate check involving MINMEM
2873
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2874
                      pnhvinfo["memory_free"])
2875
          if miss_mem > 0:
2876
            raise errors.OpPrereqError("This change will prevent the instance"
2877
                                       " from starting, due to %d MB of memory"
2878
                                       " missing on its primary node" %
2879
                                       miss_mem, errors.ECODE_NORES)
2880

    
2881
      if be_new[constants.BE_AUTO_BALANCE]:
2882
        for node_uuid, nres in nodeinfo.items():
2883
          if node_uuid not in instance.secondary_nodes:
2884
            continue
2885
          nres.Raise("Can't get info from secondary node %s" %
2886
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2887
                     ecode=errors.ECODE_STATE)
2888
          (_, _, (nhvinfo, )) = nres.payload
2889
          if not isinstance(nhvinfo.get("memory_free", None), int):
2890
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2891
                                       " memory information" %
2892
                                       self.cfg.GetNodeName(node_uuid),
2893
                                       errors.ECODE_STATE)
2894
          #TODO(dynmem): do the appropriate check involving MINMEM
2895
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2896
            raise errors.OpPrereqError("This change will prevent the instance"
2897
                                       " from failover to its secondary node"
2898
                                       " %s, due to not enough memory" %
2899
                                       self.cfg.GetNodeName(node_uuid),
2900
                                       errors.ECODE_STATE)
2901

    
2902
    if self.op.runtime_mem:
2903
      remote_info = self.rpc.call_instance_info(
2904
         instance.primary_node, instance.name, instance.hypervisor,
2905
         instance.hvparams)
2906
      remote_info.Raise("Error checking node %s" %
2907
                        self.cfg.GetNodeName(instance.primary_node))
2908
      if not remote_info.payload: # not running already
2909
        raise errors.OpPrereqError("Instance %s is not running" %
2910
                                   instance.name, errors.ECODE_STATE)
2911

    
2912
      current_memory = remote_info.payload["memory"]
2913
      if (not self.op.force and
2914
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2915
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2916
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2917
                                   " and %d MB of memory unless --force is"
2918
                                   " given" %
2919
                                   (instance.name,
2920
                                    self.be_proposed[constants.BE_MINMEM],
2921
                                    self.be_proposed[constants.BE_MAXMEM]),
2922
                                   errors.ECODE_INVAL)
2923

    
2924
      delta = self.op.runtime_mem - current_memory
2925
      if delta > 0:
2926
        CheckNodeFreeMemory(
2927
            self, instance.primary_node, "ballooning memory for instance %s" %
2928
            instance.name, delta, instance.hypervisor,
2929
            self.cfg.GetClusterInfo().hvparams[instance.hypervisor])
2930

    
2931
    def _PrepareNicCreate(_, params, private):
2932
      self._PrepareNicModification(params, private, None, None,
2933
                                   {}, cluster, pnode_uuid)
2934
      return (None, None)
2935

    
2936
    def _PrepareNicMod(_, nic, params, private):
2937
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2938
                                   nic.nicparams, cluster, pnode_uuid)
2939
      return None
2940

    
2941
    def _PrepareNicRemove(_, params, __):
2942
      ip = params.ip
2943
      net = params.network
2944
      if net is not None and ip is not None:
2945
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2946

    
2947
    # Verify NIC changes (operating on copy)
2948
    nics = instance.nics[:]
2949
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2950
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2951
    if len(nics) > constants.MAX_NICS:
2952
      raise errors.OpPrereqError("Instance has too many network interfaces"
2953
                                 " (%d), cannot add more" % constants.MAX_NICS,
2954
                                 errors.ECODE_STATE)
2955

    
2956
    # Pre-compute NIC changes (necessary to use result in hooks)
2957
    self._nic_chgdesc = []
2958
    if self.nicmod:
2959
      # Operate on copies as this is still in prereq
2960
      nics = [nic.Copy() for nic in instance.nics]
2961
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2962
                          self._CreateNewNic, self._ApplyNicMods, None)
2963
      # Verify that NIC names are unique and valid
2964
      utils.ValidateDeviceNames("NIC", nics)
2965
      self._new_nics = nics
2966
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2967
    else:
2968
      self._new_nics = None
2969
      ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2970

    
2971
    if not self.op.ignore_ipolicy:
2972
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2973
                                                              group_info)
2974

    
2975
      # Fill ispec with backend parameters
2976
      ispec[constants.ISPEC_SPINDLE_USE] = \
2977
        self.be_new.get(constants.BE_SPINDLE_USE, None)
2978
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2979
                                                         None)
2980

    
2981
      # Copy ispec to verify parameters with min/max values separately
2982
      if self.op.disk_template:
2983
        new_disk_template = self.op.disk_template
2984
      else:
2985
        new_disk_template = instance.disk_template
2986
      ispec_max = ispec.copy()
2987
      ispec_max[constants.ISPEC_MEM_SIZE] = \
2988
        self.be_new.get(constants.BE_MAXMEM, None)
2989
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
2990
                                                     new_disk_template)
2991
      ispec_min = ispec.copy()
2992
      ispec_min[constants.ISPEC_MEM_SIZE] = \
2993
        self.be_new.get(constants.BE_MINMEM, None)
2994
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
2995
                                                     new_disk_template)
2996

    
2997
      if (res_max or res_min):
2998
        # FIXME: Improve error message by including information about whether
2999
        # the upper or lower limit of the parameter fails the ipolicy.
3000
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3001
               (group_info, group_info.name,
3002
                utils.CommaJoin(set(res_max + res_min))))
3003
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3004

    
3005
  def _ConvertPlainToDrbd(self, feedback_fn):
3006
    """Converts an instance from plain to drbd.
3007

3008
    """
3009
    feedback_fn("Converting template to drbd")
3010
    instance = self.instance
3011
    pnode_uuid = instance.primary_node
3012
    snode_uuid = self.op.remote_node_uuid
3013

    
3014
    assert instance.disk_template == constants.DT_PLAIN
3015

    
3016
    # create a fake disk info for _GenerateDiskTemplate
3017
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3018
                  constants.IDISK_VG: d.logical_id[0],
3019
                  constants.IDISK_NAME: d.name}
3020
                 for d in instance.disks]
3021
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3022
                                     instance.name, pnode_uuid, [snode_uuid],
3023
                                     disk_info, None, None, 0, feedback_fn,
3024
                                     self.diskparams)
3025
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3026
                                        self.diskparams)
3027
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3028
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3029
    info = GetInstanceInfoText(instance)
3030
    feedback_fn("Creating additional volumes...")
3031
    # first, create the missing data and meta devices
3032
    for disk in anno_disks:
3033
      # unfortunately this is... not too nice
3034
      CreateSingleBlockDev(self, pnode_uuid, instance, disk.children[1],
3035
                           info, True, p_excl_stor)
3036
      for child in disk.children:
3037
        CreateSingleBlockDev(self, snode_uuid, instance, child, info, True,
3038
                             s_excl_stor)
3039
    # at this stage, all new LVs have been created, we can rename the
3040
    # old ones
3041
    feedback_fn("Renaming original volumes...")
3042
    rename_list = [(o, n.children[0].logical_id)
3043
                   for (o, n) in zip(instance.disks, new_disks)]
3044
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3045
    result.Raise("Failed to rename original LVs")
3046

    
3047
    feedback_fn("Initializing DRBD devices...")
3048
    # all child devices are in place, we can now create the DRBD devices
3049
    try:
3050
      for disk in anno_disks:
3051
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3052
                                       (snode_uuid, s_excl_stor)]:
3053
          f_create = node_uuid == pnode_uuid
3054
          CreateSingleBlockDev(self, node_uuid, instance, disk, info, f_create,
3055
                               excl_stor)
3056
    except errors.GenericError, e:
3057
      feedback_fn("Initializing of DRBD devices failed;"
3058
                  " renaming back original volumes...")
3059
      for disk in new_disks:
3060
        self.cfg.SetDiskID(disk, pnode_uuid)
3061
      rename_back_list = [(n.children[0], o.logical_id)
3062
                          for (n, o) in zip(new_disks, instance.disks)]
3063
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3064
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3065
      raise
3066

    
3067
    # at this point, the instance has been modified
3068
    instance.disk_template = constants.DT_DRBD8
3069
    instance.disks = new_disks
3070
    self.cfg.Update(instance, feedback_fn)
3071

    
3072
    # Release node locks while waiting for sync
3073
    ReleaseLocks(self, locking.LEVEL_NODE)
3074

    
3075
    # disks are created, waiting for sync
3076
    disk_abort = not WaitForSync(self, instance,
3077
                                 oneshot=not self.op.wait_for_sync)
3078
    if disk_abort:
3079
      raise errors.OpExecError("There are some degraded disks for"
3080
                               " this instance, please cleanup manually")
3081

    
3082
    # Node resource locks will be released by caller
3083

    
3084
  def _ConvertDrbdToPlain(self, feedback_fn):
3085
    """Converts an instance from drbd to plain.
3086

3087
    """
3088
    instance = self.instance
3089

    
3090
    assert len(instance.secondary_nodes) == 1
3091
    assert instance.disk_template == constants.DT_DRBD8
3092

    
3093
    pnode_uuid = instance.primary_node
3094
    snode_uuid = instance.secondary_nodes[0]
3095
    feedback_fn("Converting template to plain")
3096

    
3097
    old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3098
    new_disks = [d.children[0] for d in instance.disks]
3099

    
3100
    # copy over size, mode and name
3101
    for parent, child in zip(old_disks, new_disks):
3102
      child.size = parent.size
3103
      child.mode = parent.mode
3104
      child.name = parent.name
3105

    
3106
    # this is a DRBD disk, return its port to the pool
3107
    # NOTE: this must be done right before the call to cfg.Update!
3108
    for disk in old_disks:
3109
      tcp_port = disk.logical_id[2]
3110
      self.cfg.AddTcpUdpPort(tcp_port)
3111

    
3112
    # update instance structure
3113
    instance.disks = new_disks
3114
    instance.disk_template = constants.DT_PLAIN
3115
    _UpdateIvNames(0, instance.disks)
3116
    self.cfg.Update(instance, feedback_fn)
3117

    
3118
    # Release locks in case removing disks takes a while
3119
    ReleaseLocks(self, locking.LEVEL_NODE)
3120

    
3121
    feedback_fn("Removing volumes on the secondary node...")
3122
    for disk in old_disks:
3123
      self.cfg.SetDiskID(disk, snode_uuid)
3124
      msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3125
      if msg:
3126
        self.LogWarning("Could not remove block device %s on node %s,"
3127
                        " continuing anyway: %s", disk.iv_name,
3128
                        self.cfg.GetNodeName(snode_uuid), msg)
3129

    
3130
    feedback_fn("Removing unneeded volumes on the primary node...")
3131
    for idx, disk in enumerate(old_disks):
3132
      meta = disk.children[1]
3133
      self.cfg.SetDiskID(meta, pnode_uuid)
3134
      msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3135
      if msg:
3136
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3137
                        " continuing anyway: %s", idx,
3138
                        self.cfg.GetNodeName(pnode_uuid), msg)
3139

    
3140
  def _CreateNewDisk(self, idx, params, _):
3141
    """Creates a new disk.
3142

3143
    """
3144
    instance = self.instance
3145

    
3146
    # add a new disk
3147
    if instance.disk_template in constants.DTS_FILEBASED:
3148
      (file_driver, file_path) = instance.disks[0].logical_id
3149
      file_path = os.path.dirname(file_path)
3150
    else:
3151
      file_driver = file_path = None
3152

    
3153
    disk = \
3154
      GenerateDiskTemplate(self, instance.disk_template, instance.name,
3155
                           instance.primary_node, instance.secondary_nodes,
3156
                           [params], file_path, file_driver, idx,
3157
                           self.Log, self.diskparams)[0]
3158

    
3159
    new_disks = CreateDisks(self, instance, disks=[disk])
3160

    
3161
    if self.cluster.prealloc_wipe_disks:
3162
      # Wipe new disk
3163
      WipeOrCleanupDisks(self, instance,
3164
                         disks=[(idx, disk, 0)],
3165
                         cleanup=new_disks)
3166

    
3167
    return (disk, [
3168
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3169
      ])
3170

    
3171
  @staticmethod
3172
  def _ModifyDisk(idx, disk, params, _):
3173
    """Modifies a disk.
3174

3175
    """
3176
    changes = []
3177
    mode = params.get(constants.IDISK_MODE, None)
3178
    if mode:
3179
      disk.mode = mode
3180
      changes.append(("disk.mode/%d" % idx, disk.mode))
3181

    
3182
    name = params.get(constants.IDISK_NAME, None)
3183
    disk.name = name
3184
    changes.append(("disk.name/%d" % idx, disk.name))
3185

    
3186
    return changes
3187

    
3188
  def _RemoveDisk(self, idx, root, _):
3189
    """Removes a disk.
3190

3191
    """
3192
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3193
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3194
                             self.instance.primary_node):
3195
      self.cfg.SetDiskID(disk, node_uuid)
3196
      msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3197
      if msg:
3198
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3199
                        " continuing anyway", idx,
3200
                        self.cfg.GetNodeName(node_uuid), msg)
3201

    
3202
    # if this is a DRBD disk, return its port to the pool
3203
    if root.dev_type in constants.LDS_DRBD:
3204
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3205

    
3206
  def _CreateNewNic(self, idx, params, private):
3207
    """Creates data structure for a new network interface.
3208

3209
    """
3210
    mac = params[constants.INIC_MAC]
3211
    ip = params.get(constants.INIC_IP, None)
3212
    net = params.get(constants.INIC_NETWORK, None)
3213
    name = params.get(constants.INIC_NAME, None)
3214
    net_uuid = self.cfg.LookupNetwork(net)
3215
    #TODO: not private.filled?? can a nic have no nicparams??
3216
    nicparams = private.filled
3217
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3218
                       nicparams=nicparams)
3219
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3220

    
3221
    return (nobj, [
3222
      ("nic.%d" % idx,
3223
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3224
       (mac, ip, private.filled[constants.NIC_MODE],
3225
       private.filled[constants.NIC_LINK],
3226
       net)),
3227
      ])
3228

    
3229
  def _ApplyNicMods(self, idx, nic, params, private):
3230
    """Modifies a network interface.
3231

3232
    """
3233
    changes = []
3234

    
3235
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3236
      if key in params:
3237
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3238
        setattr(nic, key, params[key])
3239

    
3240
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3241
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3242
    if new_net_uuid != nic.network:
3243
      changes.append(("nic.network/%d" % idx, new_net))
3244
      nic.network = new_net_uuid
3245

    
3246
    if private.filled:
3247
      nic.nicparams = private.filled
3248

    
3249
      for (key, val) in nic.nicparams.items():
3250
        changes.append(("nic.%s/%d" % (key, idx), val))
3251

    
3252
    return changes
3253

    
3254
  def Exec(self, feedback_fn):
3255
    """Modifies an instance.
3256

3257
    All parameters take effect only at the next restart of the instance.
3258

3259
    """
3260
    # Process here the warnings from CheckPrereq, as we don't have a
3261
    # feedback_fn there.
3262
    # TODO: Replace with self.LogWarning
3263
    for warn in self.warn:
3264
      feedback_fn("WARNING: %s" % warn)
3265

    
3266
    assert ((self.op.disk_template is None) ^
3267
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3268
      "Not owning any node resource locks"
3269

    
3270
    result = []
3271
    instance = self.instance
3272

    
3273
    # New primary node
3274
    if self.op.pnode_uuid:
3275
      instance.primary_node = self.op.pnode_uuid
3276

    
3277
    # runtime memory
3278
    if self.op.runtime_mem:
3279
      rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3280
                                                     instance,
3281
                                                     self.op.runtime_mem)
3282
      rpcres.Raise("Cannot modify instance runtime memory")
3283
      result.append(("runtime_memory", self.op.runtime_mem))
3284

    
3285
    # Apply disk changes
3286
    _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3287
                        self._CreateNewDisk, self._ModifyDisk,
3288
                        self._RemoveDisk)
3289
    _UpdateIvNames(0, instance.disks)
3290

    
3291
    if self.op.disk_template:
3292
      if __debug__:
3293
        check_nodes = set(instance.all_nodes)
3294
        if self.op.remote_node_uuid:
3295
          check_nodes.add(self.op.remote_node_uuid)
3296
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3297
          owned = self.owned_locks(level)
3298
          assert not (check_nodes - owned), \
3299
            ("Not owning the correct locks, owning %r, expected at least %r" %
3300
             (owned, check_nodes))
3301

    
3302
      r_shut = ShutdownInstanceDisks(self, instance)
3303
      if not r_shut:
3304
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3305
                                 " proceed with disk template conversion")
3306
      mode = (instance.disk_template, self.op.disk_template)
3307
      try:
3308
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3309
      except:
3310
        self.cfg.ReleaseDRBDMinors(instance.name)
3311
        raise
3312
      result.append(("disk_template", self.op.disk_template))
3313

    
3314
      assert instance.disk_template == self.op.disk_template, \
3315
        ("Expected disk template '%s', found '%s'" %
3316
         (self.op.disk_template, instance.disk_template))
3317

    
3318
    # Release node and resource locks if there are any (they might already have
3319
    # been released during disk conversion)
3320
    ReleaseLocks(self, locking.LEVEL_NODE)
3321
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3322

    
3323
    # Apply NIC changes
3324
    if self._new_nics is not None:
3325
      instance.nics = self._new_nics
3326
      result.extend(self._nic_chgdesc)
3327

    
3328
    # hvparams changes
3329
    if self.op.hvparams:
3330
      instance.hvparams = self.hv_inst
3331
      for key, val in self.op.hvparams.iteritems():
3332
        result.append(("hv/%s" % key, val))
3333

    
3334
    # beparams changes
3335
    if self.op.beparams:
3336
      instance.beparams = self.be_inst
3337
      for key, val in self.op.beparams.iteritems():
3338
        result.append(("be/%s" % key, val))
3339

    
3340
    # OS change
3341
    if self.op.os_name:
3342
      instance.os = self.op.os_name
3343

    
3344
    # osparams changes
3345
    if self.op.osparams:
3346
      instance.osparams = self.os_inst
3347
      for key, val in self.op.osparams.iteritems():
3348
        result.append(("os/%s" % key, val))
3349

    
3350
    if self.op.offline is None:
3351
      # Ignore
3352
      pass
3353
    elif self.op.offline:
3354
      # Mark instance as offline
3355
      self.cfg.MarkInstanceOffline(instance.name)
3356
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3357
    else:
3358
      # Mark instance as online, but stopped
3359
      self.cfg.MarkInstanceDown(instance.name)
3360
      result.append(("admin_state", constants.ADMINST_DOWN))
3361

    
3362
    self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3363

    
3364
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3365
                self.owned_locks(locking.LEVEL_NODE)), \
3366
      "All node locks should have been released by now"
3367

    
3368
    return result
3369

    
3370
  _DISK_CONVERSIONS = {
3371
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3372
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3373
    }
3374

    
3375

    
3376
class LUInstanceChangeGroup(LogicalUnit):
3377
  HPATH = "instance-change-group"
3378
  HTYPE = constants.HTYPE_INSTANCE
3379
  REQ_BGL = False
3380

    
3381
  def ExpandNames(self):
3382
    self.share_locks = ShareAll()
3383

    
3384
    self.needed_locks = {
3385
      locking.LEVEL_NODEGROUP: [],
3386
      locking.LEVEL_NODE: [],
3387
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3388
      }
3389

    
3390
    self._ExpandAndLockInstance()
3391

    
3392
    if self.op.target_groups:
3393
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3394
                                  self.op.target_groups)
3395
    else:
3396
      self.req_target_uuids = None
3397

    
3398
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3399

    
3400
  def DeclareLocks(self, level):
3401
    if level == locking.LEVEL_NODEGROUP:
3402
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3403

    
3404
      if self.req_target_uuids:
3405
        lock_groups = set(self.req_target_uuids)
3406

    
3407
        # Lock all groups used by instance optimistically; this requires going
3408
        # via the node before it's locked, requiring verification later on
3409
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3410
        lock_groups.update(instance_groups)
3411
      else:
3412
        # No target groups, need to lock all of them
3413
        lock_groups = locking.ALL_SET
3414

    
3415
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3416

    
3417
    elif level == locking.LEVEL_NODE:
3418
      if self.req_target_uuids:
3419
        # Lock all nodes used by instances
3420
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3421
        self._LockInstancesNodes()
3422

    
3423
        # Lock all nodes in all potential target groups
3424
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3425
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3426
        member_nodes = [node_uuid
3427
                        for group in lock_groups
3428
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3429
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3430
      else:
3431
        # Lock all nodes as all groups are potential targets
3432
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3433

    
3434
  def CheckPrereq(self):
3435
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3436
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3437
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3438

    
3439
    assert (self.req_target_uuids is None or
3440
            owned_groups.issuperset(self.req_target_uuids))
3441
    assert owned_instances == set([self.op.instance_name])
3442

    
3443
    # Get instance information
3444
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3445

    
3446
    # Check if node groups for locked instance are still correct
3447
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3448
      ("Instance %s's nodes changed while we kept the lock" %
3449
       self.op.instance_name)
3450

    
3451
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3452
                                          owned_groups)
3453

    
3454
    if self.req_target_uuids:
3455
      # User requested specific target groups
3456
      self.target_uuids = frozenset(self.req_target_uuids)
3457
    else:
3458
      # All groups except those used by the instance are potential targets
3459
      self.target_uuids = owned_groups - inst_groups
3460

    
3461
    conflicting_groups = self.target_uuids & inst_groups
3462
    if conflicting_groups:
3463
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3464
                                 " used by the instance '%s'" %
3465
                                 (utils.CommaJoin(conflicting_groups),
3466
                                  self.op.instance_name),
3467
                                 errors.ECODE_INVAL)
3468

    
3469
    if not self.target_uuids:
3470
      raise errors.OpPrereqError("There are no possible target groups",
3471
                                 errors.ECODE_INVAL)
3472

    
3473
  def BuildHooksEnv(self):
3474
    """Build hooks env.
3475

3476
    """
3477
    assert self.target_uuids
3478

    
3479
    env = {
3480
      "TARGET_GROUPS": " ".join(self.target_uuids),
3481
      }
3482

    
3483
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3484

    
3485
    return env
3486

    
3487
  def BuildHooksNodes(self):
3488
    """Build hooks nodes.
3489

3490
    """
3491
    mn = self.cfg.GetMasterNode()
3492
    return ([mn], [mn])
3493

    
3494
  def Exec(self, feedback_fn):
3495
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3496

    
3497
    assert instances == [self.op.instance_name], "Instance not locked"
3498

    
3499
    req = iallocator.IAReqGroupChange(instances=instances,
3500
                                      target_groups=list(self.target_uuids))
3501
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3502

    
3503
    ial.Run(self.op.iallocator)
3504

    
3505
    if not ial.success:
3506
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3507
                                 " instance '%s' using iallocator '%s': %s" %
3508
                                 (self.op.instance_name, self.op.iallocator,
3509
                                  ial.info), errors.ECODE_NORES)
3510

    
3511
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3512

    
3513
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3514
                 " instance '%s'", len(jobs), self.op.instance_name)
3515

    
3516
    return ResultWithJobs(jobs)