Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 5831baf3

History | View | Annotate | Download (142.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
64

    
65
import ganeti.masterd.instance
66

    
67

    
68
#: Type description for changes as returned by L{_ApplyContainerMods}'s
69
#: callbacks
70
_TApplyContModsCbChanges = \
71
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
72
    ht.TNonEmptyString,
73
    ht.TAny,
74
    ])))
75

    
76

    
77
def _CheckHostnameSane(lu, name):
78
  """Ensures that a given hostname resolves to a 'sane' name.
79

80
  The given name is required to be a prefix of the resolved hostname,
81
  to prevent accidental mismatches.
82

83
  @param lu: the logical unit on behalf of which we're checking
84
  @param name: the name we should resolve and check
85
  @return: the resolved hostname object
86

87
  """
88
  hostname = netutils.GetHostname(name=name)
89
  if hostname.name != name:
90
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91
  if not utils.MatchNameComponent(name, [hostname.name]):
92
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93
                                " same as given hostname '%s'") %
94
                               (hostname.name, name), errors.ECODE_INVAL)
95
  return hostname
96

    
97

    
98
def _CheckOpportunisticLocking(op):
99
  """Generate error if opportunistic locking is not possible.
100

101
  """
102
  if op.opportunistic_locking and not op.iallocator:
103
    raise errors.OpPrereqError("Opportunistic locking is only available in"
104
                               " combination with an instance allocator",
105
                               errors.ECODE_INVAL)
106

    
107

    
108
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109
  """Wrapper around IAReqInstanceAlloc.
110

111
  @param op: The instance opcode
112
  @param disks: The computed disks
113
  @param nics: The computed nics
114
  @param beparams: The full filled beparams
115
  @param node_whitelist: List of nodes which should appear as online to the
116
    allocator (unless the node is already marked offline)
117

118
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
119

120
  """
121
  spindle_use = beparams[constants.BE_SPINDLE_USE]
122
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123
                                       disk_template=op.disk_template,
124
                                       tags=op.tags,
125
                                       os=op.os_type,
126
                                       vcpus=beparams[constants.BE_VCPUS],
127
                                       memory=beparams[constants.BE_MAXMEM],
128
                                       spindle_use=spindle_use,
129
                                       disks=disks,
130
                                       nics=[n.ToDict() for n in nics],
131
                                       hypervisor=op.hypervisor,
132
                                       node_whitelist=node_whitelist)
133

    
134

    
135
def _ComputeFullBeParams(op, cluster):
136
  """Computes the full beparams.
137

138
  @param op: The instance opcode
139
  @param cluster: The cluster config object
140

141
  @return: The fully filled beparams
142

143
  """
144
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
145
  for param, value in op.beparams.iteritems():
146
    if value == constants.VALUE_AUTO:
147
      op.beparams[param] = default_beparams[param]
148
  objects.UpgradeBeParams(op.beparams)
149
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150
  return cluster.SimpleFillBE(op.beparams)
151

    
152

    
153
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154
  """Computes the nics.
155

156
  @param op: The instance opcode
157
  @param cluster: Cluster configuration object
158
  @param default_ip: The default ip to assign
159
  @param cfg: An instance of the configuration object
160
  @param ec_id: Execution context ID
161

162
  @returns: The build up nics
163

164
  """
165
  nics = []
166
  for nic in op.nics:
167
    nic_mode_req = nic.get(constants.INIC_MODE, None)
168
    nic_mode = nic_mode_req
169
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
171

    
172
    net = nic.get(constants.INIC_NETWORK, None)
173
    link = nic.get(constants.NIC_LINK, None)
174
    ip = nic.get(constants.INIC_IP, None)
175

    
176
    if net is None or net.lower() == constants.VALUE_NONE:
177
      net = None
178
    else:
179
      if nic_mode_req is not None or link is not None:
180
        raise errors.OpPrereqError("If network is given, no mode or link"
181
                                   " is allowed to be passed",
182
                                   errors.ECODE_INVAL)
183

    
184
    # ip validity checks
185
    if ip is None or ip.lower() == constants.VALUE_NONE:
186
      nic_ip = None
187
    elif ip.lower() == constants.VALUE_AUTO:
188
      if not op.name_check:
189
        raise errors.OpPrereqError("IP address set to auto but name checks"
190
                                   " have been skipped",
191
                                   errors.ECODE_INVAL)
192
      nic_ip = default_ip
193
    else:
194
      # We defer pool operations until later, so that the iallocator has
195
      # filled in the instance's node(s) dimara
196
      if ip.lower() == constants.NIC_IP_POOL:
197
        if net is None:
198
          raise errors.OpPrereqError("if ip=pool, parameter network"
199
                                     " must be passed too",
200
                                     errors.ECODE_INVAL)
201

    
202
      elif not netutils.IPAddress.IsValid(ip):
203
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
204
                                   errors.ECODE_INVAL)
205

    
206
      nic_ip = ip
207

    
208
    # TODO: check the ip address for uniqueness
209
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
211
                                 errors.ECODE_INVAL)
212

    
213
    # MAC address verification
214
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216
      mac = utils.NormalizeAndValidateMac(mac)
217

    
218
      try:
219
        # TODO: We need to factor this out
220
        cfg.ReserveMAC(mac, ec_id)
221
      except errors.ReservationError:
222
        raise errors.OpPrereqError("MAC address %s already in use"
223
                                   " in cluster" % mac,
224
                                   errors.ECODE_NOTUNIQUE)
225

    
226
    #  Build nic parameters
227
    nicparams = {}
228
    if nic_mode_req:
229
      nicparams[constants.NIC_MODE] = nic_mode
230
    if link:
231
      nicparams[constants.NIC_LINK] = link
232

    
233
    check_params = cluster.SimpleFillNIC(nicparams)
234
    objects.NIC.CheckParameterSyntax(check_params)
235
    net_uuid = cfg.LookupNetwork(net)
236
    name = nic.get(constants.INIC_NAME, None)
237
    if name is not None and name.lower() == constants.VALUE_NONE:
238
      name = None
239
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240
                          network=net_uuid, nicparams=nicparams)
241
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
242
    nics.append(nic_obj)
243

    
244
  return nics
245

    
246

    
247
def _CheckForConflictingIp(lu, ip, node):
248
  """In case of conflicting IP address raise error.
249

250
  @type ip: string
251
  @param ip: IP address
252
  @type node: string
253
  @param node: node name
254

255
  """
256
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257
  if conf_net is not None:
258
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259
                                " network %s, but the target NIC does not." %
260
                                (ip, conf_net)),
261
                               errors.ECODE_STATE)
262

    
263
  return (None, None)
264

    
265

    
266
def _ComputeIPolicyInstanceSpecViolation(
267
  ipolicy, instance_spec, disk_template,
268
  _compute_fn=ComputeIPolicySpecViolation):
269
  """Compute if instance specs meets the specs of ipolicy.
270

271
  @type ipolicy: dict
272
  @param ipolicy: The ipolicy to verify against
273
  @param instance_spec: dict
274
  @param instance_spec: The instance spec to verify
275
  @type disk_template: string
276
  @param disk_template: the disk template of the instance
277
  @param _compute_fn: The function to verify ipolicy (unittest only)
278
  @see: L{ComputeIPolicySpecViolation}
279

280
  """
281
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
287

    
288
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289
                     disk_sizes, spindle_use, disk_template)
290

    
291

    
292
def _CheckOSVariant(os_obj, name):
293
  """Check whether an OS name conforms to the os variants specification.
294

295
  @type os_obj: L{objects.OS}
296
  @param os_obj: OS object to check
297
  @type name: string
298
  @param name: OS name passed by the user, to check for validity
299

300
  """
301
  variant = objects.OS.GetVariant(name)
302
  if not os_obj.supported_variants:
303
    if variant:
304
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305
                                 " passed)" % (os_obj.name, variant),
306
                                 errors.ECODE_INVAL)
307
    return
308
  if not variant:
309
    raise errors.OpPrereqError("OS name must include a variant",
310
                               errors.ECODE_INVAL)
311

    
312
  if variant not in os_obj.supported_variants:
313
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
314

    
315

    
316
class LUInstanceCreate(LogicalUnit):
317
  """Create an instance.
318

319
  """
320
  HPATH = "instance-add"
321
  HTYPE = constants.HTYPE_INSTANCE
322
  REQ_BGL = False
323

    
324
  def CheckArguments(self):
325
    """Check arguments.
326

327
    """
328
    # do not require name_check to ease forward/backward compatibility
329
    # for tools
330
    if self.op.no_install and self.op.start:
331
      self.LogInfo("No-installation mode selected, disabling startup")
332
      self.op.start = False
333
    # validate/normalize the instance name
334
    self.op.instance_name = \
335
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
336

    
337
    if self.op.ip_check and not self.op.name_check:
338
      # TODO: make the ip check more flexible and not depend on the name check
339
      raise errors.OpPrereqError("Cannot do IP address check without a name"
340
                                 " check", errors.ECODE_INVAL)
341

    
342
    # check nics' parameter names
343
    for nic in self.op.nics:
344
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345
    # check that NIC's parameters names are unique and valid
346
    utils.ValidateDeviceNames("NIC", self.op.nics)
347

    
348
    # check that disk's names are unique and valid
349
    utils.ValidateDeviceNames("disk", self.op.disks)
350

    
351
    cluster = self.cfg.GetClusterInfo()
352
    if not self.op.disk_template in cluster.enabled_disk_templates:
353
      raise errors.OpPrereqError("Cannot create an instance with disk template"
354
                                 " '%s', because it is not enabled in the"
355
                                 " cluster. Enabled disk templates are: %s." %
356
                                 (self.op.disk_template,
357
                                  ",".join(cluster.enabled_disk_templates)))
358

    
359
    # check disks. parameter names and consistent adopt/no-adopt strategy
360
    has_adopt = has_no_adopt = False
361
    for disk in self.op.disks:
362
      if self.op.disk_template != constants.DT_EXT:
363
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364
      if constants.IDISK_ADOPT in disk:
365
        has_adopt = True
366
      else:
367
        has_no_adopt = True
368
    if has_adopt and has_no_adopt:
369
      raise errors.OpPrereqError("Either all disks are adopted or none is",
370
                                 errors.ECODE_INVAL)
371
    if has_adopt:
372
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373
        raise errors.OpPrereqError("Disk adoption is not supported for the"
374
                                   " '%s' disk template" %
375
                                   self.op.disk_template,
376
                                   errors.ECODE_INVAL)
377
      if self.op.iallocator is not None:
378
        raise errors.OpPrereqError("Disk adoption not allowed with an"
379
                                   " iallocator script", errors.ECODE_INVAL)
380
      if self.op.mode == constants.INSTANCE_IMPORT:
381
        raise errors.OpPrereqError("Disk adoption not allowed for"
382
                                   " instance import", errors.ECODE_INVAL)
383
    else:
384
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
385
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386
                                   " but no 'adopt' parameter given" %
387
                                   self.op.disk_template,
388
                                   errors.ECODE_INVAL)
389

    
390
    self.adopt_disks = has_adopt
391

    
392
    # instance name verification
393
    if self.op.name_check:
394
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395
      self.op.instance_name = self.hostname1.name
396
      # used in CheckPrereq for ip ping check
397
      self.check_ip = self.hostname1.ip
398
    else:
399
      self.check_ip = None
400

    
401
    # file storage checks
402
    if (self.op.file_driver and
403
        not self.op.file_driver in constants.FILE_DRIVER):
404
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
405
                                 self.op.file_driver, errors.ECODE_INVAL)
406

    
407
    # set default file_driver if unset and required
408
    if (not self.op.file_driver and
409
        self.op.disk_template in [constants.DT_FILE,
410
                                  constants.DT_SHARED_FILE]):
411
      self.op.file_driver = constants.FD_DEFAULT
412

    
413
    if self.op.disk_template == constants.DT_FILE:
414
      opcodes.RequireFileStorage()
415
    elif self.op.disk_template == constants.DT_SHARED_FILE:
416
      opcodes.RequireSharedFileStorage()
417

    
418
    ### Node/iallocator related checks
419
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
420

    
421
    if self.op.pnode is not None:
422
      if self.op.disk_template in constants.DTS_INT_MIRROR:
423
        if self.op.snode is None:
424
          raise errors.OpPrereqError("The networked disk templates need"
425
                                     " a mirror node", errors.ECODE_INVAL)
426
      elif self.op.snode:
427
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
428
                        " template")
429
        self.op.snode = None
430

    
431
    _CheckOpportunisticLocking(self.op)
432

    
433
    self._cds = GetClusterDomainSecret()
434

    
435
    if self.op.mode == constants.INSTANCE_IMPORT:
436
      # On import force_variant must be True, because if we forced it at
437
      # initial install, our only chance when importing it back is that it
438
      # works again!
439
      self.op.force_variant = True
440

    
441
      if self.op.no_install:
442
        self.LogInfo("No-installation mode has no effect during import")
443

    
444
    elif self.op.mode == constants.INSTANCE_CREATE:
445
      if self.op.os_type is None:
446
        raise errors.OpPrereqError("No guest OS specified",
447
                                   errors.ECODE_INVAL)
448
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
449
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
450
                                   " installation" % self.op.os_type,
451
                                   errors.ECODE_STATE)
452
      if self.op.disk_template is None:
453
        raise errors.OpPrereqError("No disk template specified",
454
                                   errors.ECODE_INVAL)
455

    
456
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
457
      # Check handshake to ensure both clusters have the same domain secret
458
      src_handshake = self.op.source_handshake
459
      if not src_handshake:
460
        raise errors.OpPrereqError("Missing source handshake",
461
                                   errors.ECODE_INVAL)
462

    
463
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
464
                                                           src_handshake)
465
      if errmsg:
466
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
467
                                   errors.ECODE_INVAL)
468

    
469
      # Load and check source CA
470
      self.source_x509_ca_pem = self.op.source_x509_ca
471
      if not self.source_x509_ca_pem:
472
        raise errors.OpPrereqError("Missing source X509 CA",
473
                                   errors.ECODE_INVAL)
474

    
475
      try:
476
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
477
                                                    self._cds)
478
      except OpenSSL.crypto.Error, err:
479
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
480
                                   (err, ), errors.ECODE_INVAL)
481

    
482
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
483
      if errcode is not None:
484
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
485
                                   errors.ECODE_INVAL)
486

    
487
      self.source_x509_ca = cert
488

    
489
      src_instance_name = self.op.source_instance_name
490
      if not src_instance_name:
491
        raise errors.OpPrereqError("Missing source instance name",
492
                                   errors.ECODE_INVAL)
493

    
494
      self.source_instance_name = \
495
        netutils.GetHostname(name=src_instance_name).name
496

    
497
    else:
498
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
499
                                 self.op.mode, errors.ECODE_INVAL)
500

    
501
  def ExpandNames(self):
502
    """ExpandNames for CreateInstance.
503

504
    Figure out the right locks for instance creation.
505

506
    """
507
    self.needed_locks = {}
508

    
509
    instance_name = self.op.instance_name
510
    # this is just a preventive check, but someone might still add this
511
    # instance in the meantime, and creation will fail at lock-add time
512
    if instance_name in self.cfg.GetInstanceList():
513
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
514
                                 instance_name, errors.ECODE_EXISTS)
515

    
516
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
517

    
518
    if self.op.iallocator:
519
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
520
      # specifying a group on instance creation and then selecting nodes from
521
      # that group
522
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
523
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
524

    
525
      if self.op.opportunistic_locking:
526
        self.opportunistic_locks[locking.LEVEL_NODE] = True
527
    else:
528
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
529
      nodelist = [self.op.pnode]
530
      if self.op.snode is not None:
531
        self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
532
        nodelist.append(self.op.snode)
533
      self.needed_locks[locking.LEVEL_NODE] = nodelist
534

    
535
    # in case of import lock the source node too
536
    if self.op.mode == constants.INSTANCE_IMPORT:
537
      src_node = self.op.src_node
538
      src_path = self.op.src_path
539

    
540
      if src_path is None:
541
        self.op.src_path = src_path = self.op.instance_name
542

    
543
      if src_node is None:
544
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
545
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
546
        self.op.src_node = None
547
        if os.path.isabs(src_path):
548
          raise errors.OpPrereqError("Importing an instance from a path"
549
                                     " requires a source node option",
550
                                     errors.ECODE_INVAL)
551
      else:
552
        self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
553
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
554
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
555
        if not os.path.isabs(src_path):
556
          self.op.src_path = src_path = \
557
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
558

    
559
    self.needed_locks[locking.LEVEL_NODE_RES] = \
560
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
561

    
562
    # Optimistically acquire shared group locks (we're reading the
563
    # configuration).  We can't just call GetInstanceNodeGroups, because the
564
    # instance doesn't exist yet. Therefore we lock all node groups of all
565
    # nodes we have.
566
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
567
      # In the case we lock all nodes for opportunistic allocation, we have no
568
      # choice than to lock all groups, because they're allocated before nodes.
569
      # This is sad, but true. At least we release all those we don't need in
570
      # CheckPrereq later.
571
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
572
    else:
573
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
574
        list(self.cfg.GetNodeGroupsFromNodes(
575
          self.needed_locks[locking.LEVEL_NODE]))
576
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
577

    
578
  def DeclareLocks(self, level):
579
    if level == locking.LEVEL_NODE_RES and \
580
      self.opportunistic_locks[locking.LEVEL_NODE]:
581
      # Even when using opportunistic locking, we require the same set of
582
      # NODE_RES locks as we got NODE locks
583
      self.needed_locks[locking.LEVEL_NODE_RES] = \
584
        self.owned_locks(locking.LEVEL_NODE)
585

    
586
  def _RunAllocator(self):
587
    """Run the allocator based on input opcode.
588

589
    """
590
    if self.op.opportunistic_locking:
591
      # Only consider nodes for which a lock is held
592
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
593
    else:
594
      node_whitelist = None
595

    
596
    #TODO Export network to iallocator so that it chooses a pnode
597
    #     in a nodegroup that has the desired network connected to
598
    req = _CreateInstanceAllocRequest(self.op, self.disks,
599
                                      self.nics, self.be_full,
600
                                      node_whitelist)
601
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
602

    
603
    ial.Run(self.op.iallocator)
604

    
605
    if not ial.success:
606
      # When opportunistic locks are used only a temporary failure is generated
607
      if self.op.opportunistic_locking:
608
        ecode = errors.ECODE_TEMP_NORES
609
      else:
610
        ecode = errors.ECODE_NORES
611

    
612
      raise errors.OpPrereqError("Can't compute nodes using"
613
                                 " iallocator '%s': %s" %
614
                                 (self.op.iallocator, ial.info),
615
                                 ecode)
616

    
617
    self.op.pnode = ial.result[0]
618
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
619
                 self.op.instance_name, self.op.iallocator,
620
                 utils.CommaJoin(ial.result))
621

    
622
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
623

    
624
    if req.RequiredNodes() == 2:
625
      self.op.snode = ial.result[1]
626

    
627
  def BuildHooksEnv(self):
628
    """Build hooks env.
629

630
    This runs on master, primary and secondary nodes of the instance.
631

632
    """
633
    env = {
634
      "ADD_MODE": self.op.mode,
635
      }
636
    if self.op.mode == constants.INSTANCE_IMPORT:
637
      env["SRC_NODE"] = self.op.src_node
638
      env["SRC_PATH"] = self.op.src_path
639
      env["SRC_IMAGES"] = self.src_images
640

    
641
    env.update(BuildInstanceHookEnv(
642
      name=self.op.instance_name,
643
      primary_node=self.op.pnode,
644
      secondary_nodes=self.secondaries,
645
      status=self.op.start,
646
      os_type=self.op.os_type,
647
      minmem=self.be_full[constants.BE_MINMEM],
648
      maxmem=self.be_full[constants.BE_MAXMEM],
649
      vcpus=self.be_full[constants.BE_VCPUS],
650
      nics=NICListToTuple(self, self.nics),
651
      disk_template=self.op.disk_template,
652
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
653
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE], {})
654
             for d in self.disks],
655
      bep=self.be_full,
656
      hvp=self.hv_full,
657
      hypervisor_name=self.op.hypervisor,
658
      tags=self.op.tags,
659
      ))
660

    
661
    return env
662

    
663
  def BuildHooksNodes(self):
664
    """Build hooks nodes.
665

666
    """
667
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
668
    return nl, nl
669

    
670
  def _ReadExportInfo(self):
671
    """Reads the export information from disk.
672

673
    It will override the opcode source node and path with the actual
674
    information, if these two were not specified before.
675

676
    @return: the export information
677

678
    """
679
    assert self.op.mode == constants.INSTANCE_IMPORT
680

    
681
    src_node = self.op.src_node
682
    src_path = self.op.src_path
683

    
684
    if src_node is None:
685
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
686
      exp_list = self.rpc.call_export_list(locked_nodes)
687
      found = False
688
      for node in exp_list:
689
        if exp_list[node].fail_msg:
690
          continue
691
        if src_path in exp_list[node].payload:
692
          found = True
693
          self.op.src_node = src_node = node
694
          self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
695
                                                       src_path)
696
          break
697
      if not found:
698
        raise errors.OpPrereqError("No export found for relative path %s" %
699
                                   src_path, errors.ECODE_INVAL)
700

    
701
    CheckNodeOnline(self, src_node)
702
    result = self.rpc.call_export_info(src_node, src_path)
703
    result.Raise("No export or invalid export found in dir %s" % src_path)
704

    
705
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
706
    if not export_info.has_section(constants.INISECT_EXP):
707
      raise errors.ProgrammerError("Corrupted export config",
708
                                   errors.ECODE_ENVIRON)
709

    
710
    ei_version = export_info.get(constants.INISECT_EXP, "version")
711
    if (int(ei_version) != constants.EXPORT_VERSION):
712
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
713
                                 (ei_version, constants.EXPORT_VERSION),
714
                                 errors.ECODE_ENVIRON)
715
    return export_info
716

    
717
  def _ReadExportParams(self, einfo):
718
    """Use export parameters as defaults.
719

720
    In case the opcode doesn't specify (as in override) some instance
721
    parameters, then try to use them from the export information, if
722
    that declares them.
723

724
    """
725
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
726

    
727
    if self.op.disk_template is None:
728
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
729
        self.op.disk_template = einfo.get(constants.INISECT_INS,
730
                                          "disk_template")
731
        if self.op.disk_template not in constants.DISK_TEMPLATES:
732
          raise errors.OpPrereqError("Disk template specified in configuration"
733
                                     " file is not one of the allowed values:"
734
                                     " %s" %
735
                                     " ".join(constants.DISK_TEMPLATES),
736
                                     errors.ECODE_INVAL)
737
      else:
738
        raise errors.OpPrereqError("No disk template specified and the export"
739
                                   " is missing the disk_template information",
740
                                   errors.ECODE_INVAL)
741

    
742
    if not self.op.disks:
743
      disks = []
744
      # TODO: import the disk iv_name too
745
      for idx in range(constants.MAX_DISKS):
746
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
747
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
748
          disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
749
          disk = {
750
            constants.IDISK_SIZE: disk_sz,
751
            constants.IDISK_NAME: disk_name
752
            }
753
          disks.append(disk)
754
      self.op.disks = disks
755
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
756
        raise errors.OpPrereqError("No disk info specified and the export"
757
                                   " is missing the disk information",
758
                                   errors.ECODE_INVAL)
759

    
760
    if not self.op.nics:
761
      nics = []
762
      for idx in range(constants.MAX_NICS):
763
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
764
          ndict = {}
765
          for name in [constants.INIC_IP,
766
                       constants.INIC_MAC, constants.INIC_NAME]:
767
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
768
            ndict[name] = v
769
          network = einfo.get(constants.INISECT_INS,
770
                              "nic%d_%s" % (idx, constants.INIC_NETWORK))
771
          # in case network is given link and mode are inherited
772
          # from nodegroup's netparams and thus should not be passed here
773
          if network:
774
            ndict[constants.INIC_NETWORK] = network
775
          else:
776
            for name in list(constants.NICS_PARAMETERS):
777
              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
778
              ndict[name] = v
779
          nics.append(ndict)
780
        else:
781
          break
782
      self.op.nics = nics
783

    
784
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
785
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
786

    
787
    if (self.op.hypervisor is None and
788
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
789
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
790

    
791
    if einfo.has_section(constants.INISECT_HYP):
792
      # use the export parameters but do not override the ones
793
      # specified by the user
794
      for name, value in einfo.items(constants.INISECT_HYP):
795
        if name not in self.op.hvparams:
796
          self.op.hvparams[name] = value
797

    
798
    if einfo.has_section(constants.INISECT_BEP):
799
      # use the parameters, without overriding
800
      for name, value in einfo.items(constants.INISECT_BEP):
801
        if name not in self.op.beparams:
802
          self.op.beparams[name] = value
803
        # Compatibility for the old "memory" be param
804
        if name == constants.BE_MEMORY:
805
          if constants.BE_MAXMEM not in self.op.beparams:
806
            self.op.beparams[constants.BE_MAXMEM] = value
807
          if constants.BE_MINMEM not in self.op.beparams:
808
            self.op.beparams[constants.BE_MINMEM] = value
809
    else:
810
      # try to read the parameters old style, from the main section
811
      for name in constants.BES_PARAMETERS:
812
        if (name not in self.op.beparams and
813
            einfo.has_option(constants.INISECT_INS, name)):
814
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
815

    
816
    if einfo.has_section(constants.INISECT_OSP):
817
      # use the parameters, without overriding
818
      for name, value in einfo.items(constants.INISECT_OSP):
819
        if name not in self.op.osparams:
820
          self.op.osparams[name] = value
821

    
822
  def _RevertToDefaults(self, cluster):
823
    """Revert the instance parameters to the default values.
824

825
    """
826
    # hvparams
827
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
828
    for name in self.op.hvparams.keys():
829
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
830
        del self.op.hvparams[name]
831
    # beparams
832
    be_defs = cluster.SimpleFillBE({})
833
    for name in self.op.beparams.keys():
834
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
835
        del self.op.beparams[name]
836
    # nic params
837
    nic_defs = cluster.SimpleFillNIC({})
838
    for nic in self.op.nics:
839
      for name in constants.NICS_PARAMETERS:
840
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
841
          del nic[name]
842
    # osparams
843
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
844
    for name in self.op.osparams.keys():
845
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
846
        del self.op.osparams[name]
847

    
848
  def _CalculateFileStorageDir(self):
849
    """Calculate final instance file storage dir.
850

851
    """
852
    # file storage dir calculation/check
853
    self.instance_file_storage_dir = None
854
    if self.op.disk_template in constants.DTS_FILEBASED:
855
      # build the full file storage dir path
856
      joinargs = []
857

    
858
      if self.op.disk_template == constants.DT_SHARED_FILE:
859
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
860
      else:
861
        get_fsd_fn = self.cfg.GetFileStorageDir
862

    
863
      cfg_storagedir = get_fsd_fn()
864
      if not cfg_storagedir:
865
        raise errors.OpPrereqError("Cluster file storage dir not defined",
866
                                   errors.ECODE_STATE)
867
      joinargs.append(cfg_storagedir)
868

    
869
      if self.op.file_storage_dir is not None:
870
        joinargs.append(self.op.file_storage_dir)
871

    
872
      joinargs.append(self.op.instance_name)
873

    
874
      # pylint: disable=W0142
875
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
876

    
877
  def CheckPrereq(self): # pylint: disable=R0914
878
    """Check prerequisites.
879

880
    """
881
    # Check that the optimistically acquired groups are correct wrt the
882
    # acquired nodes
883
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
884
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
885
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
886
    if not owned_groups.issuperset(cur_groups):
887
      raise errors.OpPrereqError("New instance %s's node groups changed since"
888
                                 " locks were acquired, current groups are"
889
                                 " are '%s', owning groups '%s'; retry the"
890
                                 " operation" %
891
                                 (self.op.instance_name,
892
                                  utils.CommaJoin(cur_groups),
893
                                  utils.CommaJoin(owned_groups)),
894
                                 errors.ECODE_STATE)
895

    
896
    self._CalculateFileStorageDir()
897

    
898
    if self.op.mode == constants.INSTANCE_IMPORT:
899
      export_info = self._ReadExportInfo()
900
      self._ReadExportParams(export_info)
901
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
902
    else:
903
      self._old_instance_name = None
904

    
905
    if (not self.cfg.GetVGName() and
906
        self.op.disk_template not in constants.DTS_NOT_LVM):
907
      raise errors.OpPrereqError("Cluster does not support lvm-based"
908
                                 " instances", errors.ECODE_STATE)
909

    
910
    if (self.op.hypervisor is None or
911
        self.op.hypervisor == constants.VALUE_AUTO):
912
      self.op.hypervisor = self.cfg.GetHypervisorType()
913

    
914
    cluster = self.cfg.GetClusterInfo()
915
    enabled_hvs = cluster.enabled_hypervisors
916
    if self.op.hypervisor not in enabled_hvs:
917
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
918
                                 " cluster (%s)" %
919
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
920
                                 errors.ECODE_STATE)
921

    
922
    # Check tag validity
923
    for tag in self.op.tags:
924
      objects.TaggableObject.ValidateTag(tag)
925

    
926
    # check hypervisor parameter syntax (locally)
927
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
928
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
929
                                      self.op.hvparams)
930
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
931
    hv_type.CheckParameterSyntax(filled_hvp)
932
    self.hv_full = filled_hvp
933
    # check that we don't specify global parameters on an instance
934
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
935
                         "instance", "cluster")
936

    
937
    # fill and remember the beparams dict
938
    self.be_full = _ComputeFullBeParams(self.op, cluster)
939

    
940
    # build os parameters
941
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
942

    
943
    # now that hvp/bep are in final format, let's reset to defaults,
944
    # if told to do so
945
    if self.op.identify_defaults:
946
      self._RevertToDefaults(cluster)
947

    
948
    # NIC buildup
949
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
950
                             self.proc.GetECId())
951

    
952
    # disk checks/pre-build
953
    default_vg = self.cfg.GetVGName()
954
    self.disks = ComputeDisks(self.op, default_vg)
955

    
956
    if self.op.mode == constants.INSTANCE_IMPORT:
957
      disk_images = []
958
      for idx in range(len(self.disks)):
959
        option = "disk%d_dump" % idx
960
        if export_info.has_option(constants.INISECT_INS, option):
961
          # FIXME: are the old os-es, disk sizes, etc. useful?
962
          export_name = export_info.get(constants.INISECT_INS, option)
963
          image = utils.PathJoin(self.op.src_path, export_name)
964
          disk_images.append(image)
965
        else:
966
          disk_images.append(False)
967

    
968
      self.src_images = disk_images
969

    
970
      if self.op.instance_name == self._old_instance_name:
971
        for idx, nic in enumerate(self.nics):
972
          if nic.mac == constants.VALUE_AUTO:
973
            nic_mac_ini = "nic%d_mac" % idx
974
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
975

    
976
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
977

    
978
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
979
    if self.op.ip_check:
980
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
981
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
982
                                   (self.check_ip, self.op.instance_name),
983
                                   errors.ECODE_NOTUNIQUE)
984

    
985
    #### mac address generation
986
    # By generating here the mac address both the allocator and the hooks get
987
    # the real final mac address rather than the 'auto' or 'generate' value.
988
    # There is a race condition between the generation and the instance object
989
    # creation, which means that we know the mac is valid now, but we're not
990
    # sure it will be when we actually add the instance. If things go bad
991
    # adding the instance will abort because of a duplicate mac, and the
992
    # creation job will fail.
993
    for nic in self.nics:
994
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
995
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
996

    
997
    #### allocator run
998

    
999
    if self.op.iallocator is not None:
1000
      self._RunAllocator()
1001

    
1002
    # Release all unneeded node locks
1003
    keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
1004
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1005
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1006
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1007
    # Release all unneeded group locks
1008
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1009
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1010

    
1011
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1012
            self.owned_locks(locking.LEVEL_NODE_RES)), \
1013
      "Node locks differ from node resource locks"
1014

    
1015
    #### node related checks
1016

    
1017
    # check primary node
1018
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
1019
    assert self.pnode is not None, \
1020
      "Cannot retrieve locked node %s" % self.op.pnode
1021
    if pnode.offline:
1022
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1023
                                 pnode.name, errors.ECODE_STATE)
1024
    if pnode.drained:
1025
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1026
                                 pnode.name, errors.ECODE_STATE)
1027
    if not pnode.vm_capable:
1028
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1029
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1030

    
1031
    self.secondaries = []
1032

    
1033
    # Fill in any IPs from IP pools. This must happen here, because we need to
1034
    # know the nic's primary node, as specified by the iallocator
1035
    for idx, nic in enumerate(self.nics):
1036
      net_uuid = nic.network
1037
      if net_uuid is not None:
1038
        nobj = self.cfg.GetNetwork(net_uuid)
1039
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
1040
        if netparams is None:
1041
          raise errors.OpPrereqError("No netparams found for network"
1042
                                     " %s. Propably not connected to"
1043
                                     " node's %s nodegroup" %
1044
                                     (nobj.name, self.pnode.name),
1045
                                     errors.ECODE_INVAL)
1046
        self.LogInfo("NIC/%d inherits netparams %s" %
1047
                     (idx, netparams.values()))
1048
        nic.nicparams = dict(netparams)
1049
        if nic.ip is not None:
1050
          if nic.ip.lower() == constants.NIC_IP_POOL:
1051
            try:
1052
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1053
            except errors.ReservationError:
1054
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1055
                                         " from the address pool" % idx,
1056
                                         errors.ECODE_STATE)
1057
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1058
          else:
1059
            try:
1060
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1061
                                 check=self.op.conflicts_check)
1062
            except errors.ReservationError:
1063
              raise errors.OpPrereqError("IP address %s already in use"
1064
                                         " or does not belong to network %s" %
1065
                                         (nic.ip, nobj.name),
1066
                                         errors.ECODE_NOTUNIQUE)
1067

    
1068
      # net is None, ip None or given
1069
      elif self.op.conflicts_check:
1070
        _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1071

    
1072
    # mirror node verification
1073
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1074
      if self.op.snode == pnode.name:
1075
        raise errors.OpPrereqError("The secondary node cannot be the"
1076
                                   " primary node", errors.ECODE_INVAL)
1077
      CheckNodeOnline(self, self.op.snode)
1078
      CheckNodeNotDrained(self, self.op.snode)
1079
      CheckNodeVmCapable(self, self.op.snode)
1080
      self.secondaries.append(self.op.snode)
1081

    
1082
      snode = self.cfg.GetNodeInfo(self.op.snode)
1083
      if pnode.group != snode.group:
1084
        self.LogWarning("The primary and secondary nodes are in two"
1085
                        " different node groups; the disk parameters"
1086
                        " from the first disk's node group will be"
1087
                        " used")
1088

    
1089
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1090
      nodes = [pnode]
1091
      if self.op.disk_template in constants.DTS_INT_MIRROR:
1092
        nodes.append(snode)
1093
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1094
      if compat.any(map(has_es, nodes)):
1095
        raise errors.OpPrereqError("Disk template %s not supported with"
1096
                                   " exclusive storage" % self.op.disk_template,
1097
                                   errors.ECODE_STATE)
1098

    
1099
    nodenames = [pnode.name] + self.secondaries
1100

    
1101
    if not self.adopt_disks:
1102
      if self.op.disk_template == constants.DT_RBD:
1103
        # _CheckRADOSFreeSpace() is just a placeholder.
1104
        # Any function that checks prerequisites can be placed here.
1105
        # Check if there is enough space on the RADOS cluster.
1106
        CheckRADOSFreeSpace()
1107
      elif self.op.disk_template == constants.DT_EXT:
1108
        # FIXME: Function that checks prereqs if needed
1109
        pass
1110
      else:
1111
        # Check lv size requirements, if not adopting
1112
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1113
        CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1114

    
1115
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1116
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1117
                                disk[constants.IDISK_ADOPT])
1118
                     for disk in self.disks])
1119
      if len(all_lvs) != len(self.disks):
1120
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1121
                                   errors.ECODE_INVAL)
1122
      for lv_name in all_lvs:
1123
        try:
1124
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1125
          # to ReserveLV uses the same syntax
1126
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1127
        except errors.ReservationError:
1128
          raise errors.OpPrereqError("LV named %s used by another instance" %
1129
                                     lv_name, errors.ECODE_NOTUNIQUE)
1130

    
1131
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1132
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1133

    
1134
      node_lvs = self.rpc.call_lv_list([pnode.name],
1135
                                       vg_names.payload.keys())[pnode.name]
1136
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1137
      node_lvs = node_lvs.payload
1138

    
1139
      delta = all_lvs.difference(node_lvs.keys())
1140
      if delta:
1141
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1142
                                   utils.CommaJoin(delta),
1143
                                   errors.ECODE_INVAL)
1144
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1145
      if online_lvs:
1146
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1147
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1148
                                   errors.ECODE_STATE)
1149
      # update the size of disk based on what is found
1150
      for dsk in self.disks:
1151
        dsk[constants.IDISK_SIZE] = \
1152
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1153
                                        dsk[constants.IDISK_ADOPT])][0]))
1154

    
1155
    elif self.op.disk_template == constants.DT_BLOCK:
1156
      # Normalize and de-duplicate device paths
1157
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1158
                       for disk in self.disks])
1159
      if len(all_disks) != len(self.disks):
1160
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1161
                                   errors.ECODE_INVAL)
1162
      baddisks = [d for d in all_disks
1163
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1164
      if baddisks:
1165
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1166
                                   " cannot be adopted" %
1167
                                   (utils.CommaJoin(baddisks),
1168
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1169
                                   errors.ECODE_INVAL)
1170

    
1171
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
1172
                                            list(all_disks))[pnode.name]
1173
      node_disks.Raise("Cannot get block device information from node %s" %
1174
                       pnode.name)
1175
      node_disks = node_disks.payload
1176
      delta = all_disks.difference(node_disks.keys())
1177
      if delta:
1178
        raise errors.OpPrereqError("Missing block device(s): %s" %
1179
                                   utils.CommaJoin(delta),
1180
                                   errors.ECODE_INVAL)
1181
      for dsk in self.disks:
1182
        dsk[constants.IDISK_SIZE] = \
1183
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1184

    
1185
    # Verify instance specs
1186
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1187
    ispec = {
1188
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1189
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1190
      constants.ISPEC_DISK_COUNT: len(self.disks),
1191
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1192
                                  for disk in self.disks],
1193
      constants.ISPEC_NIC_COUNT: len(self.nics),
1194
      constants.ISPEC_SPINDLE_USE: spindle_use,
1195
      }
1196

    
1197
    group_info = self.cfg.GetNodeGroup(pnode.group)
1198
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1199
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1200
                                               self.op.disk_template)
1201
    if not self.op.ignore_ipolicy and res:
1202
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1203
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1204
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1205

    
1206
    CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1207

    
1208
    CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1209
    # check OS parameters (remotely)
1210
    CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1211

    
1212
    CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1213

    
1214
    #TODO: _CheckExtParams (remotely)
1215
    # Check parameters for extstorage
1216

    
1217
    # memory check on primary node
1218
    #TODO(dynmem): use MINMEM for checking
1219
    if self.op.start:
1220
      CheckNodeFreeMemory(self, self.pnode.name,
1221
                          "creating instance %s" % self.op.instance_name,
1222
                          self.be_full[constants.BE_MAXMEM],
1223
                          self.op.hypervisor)
1224

    
1225
    self.dry_run_result = list(nodenames)
1226

    
1227
  def Exec(self, feedback_fn):
1228
    """Create and add the instance to the cluster.
1229

1230
    """
1231
    instance = self.op.instance_name
1232
    pnode_name = self.pnode.name
1233

    
1234
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1235
                self.owned_locks(locking.LEVEL_NODE)), \
1236
      "Node locks differ from node resource locks"
1237
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1238

    
1239
    ht_kind = self.op.hypervisor
1240
    if ht_kind in constants.HTS_REQ_PORT:
1241
      network_port = self.cfg.AllocatePort()
1242
    else:
1243
      network_port = None
1244

    
1245
    # This is ugly but we got a chicken-egg problem here
1246
    # We can only take the group disk parameters, as the instance
1247
    # has no disks yet (we are generating them right here).
1248
    node = self.cfg.GetNodeInfo(pnode_name)
1249
    nodegroup = self.cfg.GetNodeGroup(node.group)
1250
    disks = GenerateDiskTemplate(self,
1251
                                 self.op.disk_template,
1252
                                 instance, pnode_name,
1253
                                 self.secondaries,
1254
                                 self.disks,
1255
                                 self.instance_file_storage_dir,
1256
                                 self.op.file_driver,
1257
                                 0,
1258
                                 feedback_fn,
1259
                                 self.cfg.GetGroupDiskParams(nodegroup))
1260

    
1261
    iobj = objects.Instance(name=instance, os=self.op.os_type,
1262
                            primary_node=pnode_name,
1263
                            nics=self.nics, disks=disks,
1264
                            disk_template=self.op.disk_template,
1265
                            disks_active=False,
1266
                            admin_state=constants.ADMINST_DOWN,
1267
                            network_port=network_port,
1268
                            beparams=self.op.beparams,
1269
                            hvparams=self.op.hvparams,
1270
                            hypervisor=self.op.hypervisor,
1271
                            osparams=self.op.osparams,
1272
                            )
1273

    
1274
    if self.op.tags:
1275
      for tag in self.op.tags:
1276
        iobj.AddTag(tag)
1277

    
1278
    if self.adopt_disks:
1279
      if self.op.disk_template == constants.DT_PLAIN:
1280
        # rename LVs to the newly-generated names; we need to construct
1281
        # 'fake' LV disks with the old data, plus the new unique_id
1282
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1283
        rename_to = []
1284
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1285
          rename_to.append(t_dsk.logical_id)
1286
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1287
          self.cfg.SetDiskID(t_dsk, pnode_name)
1288
        result = self.rpc.call_blockdev_rename(pnode_name,
1289
                                               zip(tmp_disks, rename_to))
1290
        result.Raise("Failed to rename adoped LVs")
1291
    else:
1292
      feedback_fn("* creating instance disks...")
1293
      try:
1294
        CreateDisks(self, iobj)
1295
      except errors.OpExecError:
1296
        self.LogWarning("Device creation failed")
1297
        self.cfg.ReleaseDRBDMinors(instance)
1298
        raise
1299

    
1300
    feedback_fn("adding instance %s to cluster config" % instance)
1301

    
1302
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1303

    
1304
    # Declare that we don't want to remove the instance lock anymore, as we've
1305
    # added the instance to the config
1306
    del self.remove_locks[locking.LEVEL_INSTANCE]
1307

    
1308
    if self.op.mode == constants.INSTANCE_IMPORT:
1309
      # Release unused nodes
1310
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1311
    else:
1312
      # Release all nodes
1313
      ReleaseLocks(self, locking.LEVEL_NODE)
1314

    
1315
    disk_abort = False
1316
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1317
      feedback_fn("* wiping instance disks...")
1318
      try:
1319
        WipeDisks(self, iobj)
1320
      except errors.OpExecError, err:
1321
        logging.exception("Wiping disks failed")
1322
        self.LogWarning("Wiping instance disks failed (%s)", err)
1323
        disk_abort = True
1324

    
1325
    if disk_abort:
1326
      # Something is already wrong with the disks, don't do anything else
1327
      pass
1328
    elif self.op.wait_for_sync:
1329
      disk_abort = not WaitForSync(self, iobj)
1330
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1331
      # make sure the disks are not degraded (still sync-ing is ok)
1332
      feedback_fn("* checking mirrors status")
1333
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1334
    else:
1335
      disk_abort = False
1336

    
1337
    if disk_abort:
1338
      RemoveDisks(self, iobj)
1339
      self.cfg.RemoveInstance(iobj.name)
1340
      # Make sure the instance lock gets removed
1341
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1342
      raise errors.OpExecError("There are some degraded disks for"
1343
                               " this instance")
1344

    
1345
    # instance disks are now active
1346
    iobj.disks_active = True
1347

    
1348
    # Release all node resource locks
1349
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1350

    
1351
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1352
      # we need to set the disks ID to the primary node, since the
1353
      # preceding code might or might have not done it, depending on
1354
      # disk template and other options
1355
      for disk in iobj.disks:
1356
        self.cfg.SetDiskID(disk, pnode_name)
1357
      if self.op.mode == constants.INSTANCE_CREATE:
1358
        if not self.op.no_install:
1359
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1360
                        not self.op.wait_for_sync)
1361
          if pause_sync:
1362
            feedback_fn("* pausing disk sync to install instance OS")
1363
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1364
                                                              (iobj.disks,
1365
                                                               iobj), True)
1366
            for idx, success in enumerate(result.payload):
1367
              if not success:
1368
                logging.warn("pause-sync of instance %s for disk %d failed",
1369
                             instance, idx)
1370

    
1371
          feedback_fn("* running the instance OS create scripts...")
1372
          # FIXME: pass debug option from opcode to backend
1373
          os_add_result = \
1374
            self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1375
                                          self.op.debug_level)
1376
          if pause_sync:
1377
            feedback_fn("* resuming disk sync")
1378
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1379
                                                              (iobj.disks,
1380
                                                               iobj), False)
1381
            for idx, success in enumerate(result.payload):
1382
              if not success:
1383
                logging.warn("resume-sync of instance %s for disk %d failed",
1384
                             instance, idx)
1385

    
1386
          os_add_result.Raise("Could not add os for instance %s"
1387
                              " on node %s" % (instance, pnode_name))
1388

    
1389
      else:
1390
        if self.op.mode == constants.INSTANCE_IMPORT:
1391
          feedback_fn("* running the instance OS import scripts...")
1392

    
1393
          transfers = []
1394

    
1395
          for idx, image in enumerate(self.src_images):
1396
            if not image:
1397
              continue
1398

    
1399
            # FIXME: pass debug option from opcode to backend
1400
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1401
                                               constants.IEIO_FILE, (image, ),
1402
                                               constants.IEIO_SCRIPT,
1403
                                               (iobj.disks[idx], idx),
1404
                                               None)
1405
            transfers.append(dt)
1406

    
1407
          import_result = \
1408
            masterd.instance.TransferInstanceData(self, feedback_fn,
1409
                                                  self.op.src_node, pnode_name,
1410
                                                  self.pnode.secondary_ip,
1411
                                                  iobj, transfers)
1412
          if not compat.all(import_result):
1413
            self.LogWarning("Some disks for instance %s on node %s were not"
1414
                            " imported successfully" % (instance, pnode_name))
1415

    
1416
          rename_from = self._old_instance_name
1417

    
1418
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1419
          feedback_fn("* preparing remote import...")
1420
          # The source cluster will stop the instance before attempting to make
1421
          # a connection. In some cases stopping an instance can take a long
1422
          # time, hence the shutdown timeout is added to the connection
1423
          # timeout.
1424
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1425
                             self.op.source_shutdown_timeout)
1426
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1427

    
1428
          assert iobj.primary_node == self.pnode.name
1429
          disk_results = \
1430
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1431
                                          self.source_x509_ca,
1432
                                          self._cds, timeouts)
1433
          if not compat.all(disk_results):
1434
            # TODO: Should the instance still be started, even if some disks
1435
            # failed to import (valid for local imports, too)?
1436
            self.LogWarning("Some disks for instance %s on node %s were not"
1437
                            " imported successfully" % (instance, pnode_name))
1438

    
1439
          rename_from = self.source_instance_name
1440

    
1441
        else:
1442
          # also checked in the prereq part
1443
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1444
                                       % self.op.mode)
1445

    
1446
        # Run rename script on newly imported instance
1447
        assert iobj.name == instance
1448
        feedback_fn("Running rename script for %s" % instance)
1449
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1450
                                                   rename_from,
1451
                                                   self.op.debug_level)
1452
        if result.fail_msg:
1453
          self.LogWarning("Failed to run rename script for %s on node"
1454
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
1455

    
1456
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1457

    
1458
    if self.op.start:
1459
      iobj.admin_state = constants.ADMINST_UP
1460
      self.cfg.Update(iobj, feedback_fn)
1461
      logging.info("Starting instance %s on node %s", instance, pnode_name)
1462
      feedback_fn("* starting instance...")
1463
      result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1464
                                            False, self.op.reason)
1465
      result.Raise("Could not start instance")
1466

    
1467
    return list(iobj.all_nodes)
1468

    
1469

    
1470
class LUInstanceRename(LogicalUnit):
1471
  """Rename an instance.
1472

1473
  """
1474
  HPATH = "instance-rename"
1475
  HTYPE = constants.HTYPE_INSTANCE
1476

    
1477
  def CheckArguments(self):
1478
    """Check arguments.
1479

1480
    """
1481
    if self.op.ip_check and not self.op.name_check:
1482
      # TODO: make the ip check more flexible and not depend on the name check
1483
      raise errors.OpPrereqError("IP address check requires a name check",
1484
                                 errors.ECODE_INVAL)
1485

    
1486
  def BuildHooksEnv(self):
1487
    """Build hooks env.
1488

1489
    This runs on master, primary and secondary nodes of the instance.
1490

1491
    """
1492
    env = BuildInstanceHookEnvByObject(self, self.instance)
1493
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1494
    return env
1495

    
1496
  def BuildHooksNodes(self):
1497
    """Build hooks nodes.
1498

1499
    """
1500
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1501
    return (nl, nl)
1502

    
1503
  def CheckPrereq(self):
1504
    """Check prerequisites.
1505

1506
    This checks that the instance is in the cluster and is not running.
1507

1508
    """
1509
    self.op.instance_name = ExpandInstanceName(self.cfg,
1510
                                               self.op.instance_name)
1511
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1512
    assert instance is not None
1513
    CheckNodeOnline(self, instance.primary_node)
1514
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1515
                       msg="cannot rename")
1516
    self.instance = instance
1517

    
1518
    new_name = self.op.new_name
1519
    if self.op.name_check:
1520
      hostname = _CheckHostnameSane(self, new_name)
1521
      new_name = self.op.new_name = hostname.name
1522
      if (self.op.ip_check and
1523
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1524
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1525
                                   (hostname.ip, new_name),
1526
                                   errors.ECODE_NOTUNIQUE)
1527

    
1528
    instance_list = self.cfg.GetInstanceList()
1529
    if new_name in instance_list and new_name != instance.name:
1530
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1531
                                 new_name, errors.ECODE_EXISTS)
1532

    
1533
  def Exec(self, feedback_fn):
1534
    """Rename the instance.
1535

1536
    """
1537
    inst = self.instance
1538
    old_name = inst.name
1539

    
1540
    rename_file_storage = False
1541
    if (inst.disk_template in constants.DTS_FILEBASED and
1542
        self.op.new_name != inst.name):
1543
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1544
      rename_file_storage = True
1545

    
1546
    self.cfg.RenameInstance(inst.name, self.op.new_name)
1547
    # Change the instance lock. This is definitely safe while we hold the BGL.
1548
    # Otherwise the new lock would have to be added in acquired mode.
1549
    assert self.REQ_BGL
1550
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1551
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1552
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1553

    
1554
    # re-read the instance from the configuration after rename
1555
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
1556

    
1557
    if rename_file_storage:
1558
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1559
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1560
                                                     old_file_storage_dir,
1561
                                                     new_file_storage_dir)
1562
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1563
                   " (but the instance has been renamed in Ganeti)" %
1564
                   (inst.primary_node, old_file_storage_dir,
1565
                    new_file_storage_dir))
1566

    
1567
    StartInstanceDisks(self, inst, None)
1568
    # update info on disks
1569
    info = GetInstanceInfoText(inst)
1570
    for (idx, disk) in enumerate(inst.disks):
1571
      for node in inst.all_nodes:
1572
        self.cfg.SetDiskID(disk, node)
1573
        result = self.rpc.call_blockdev_setinfo(node, disk, info)
1574
        if result.fail_msg:
1575
          self.LogWarning("Error setting info on node %s for disk %s: %s",
1576
                          node, idx, result.fail_msg)
1577
    try:
1578
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1579
                                                 old_name, self.op.debug_level)
1580
      msg = result.fail_msg
1581
      if msg:
1582
        msg = ("Could not run OS rename script for instance %s on node %s"
1583
               " (but the instance has been renamed in Ganeti): %s" %
1584
               (inst.name, inst.primary_node, msg))
1585
        self.LogWarning(msg)
1586
    finally:
1587
      ShutdownInstanceDisks(self, inst)
1588

    
1589
    return inst.name
1590

    
1591

    
1592
class LUInstanceRemove(LogicalUnit):
1593
  """Remove an instance.
1594

1595
  """
1596
  HPATH = "instance-remove"
1597
  HTYPE = constants.HTYPE_INSTANCE
1598
  REQ_BGL = False
1599

    
1600
  def ExpandNames(self):
1601
    self._ExpandAndLockInstance()
1602
    self.needed_locks[locking.LEVEL_NODE] = []
1603
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1604
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1605

    
1606
  def DeclareLocks(self, level):
1607
    if level == locking.LEVEL_NODE:
1608
      self._LockInstancesNodes()
1609
    elif level == locking.LEVEL_NODE_RES:
1610
      # Copy node locks
1611
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1612
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1613

    
1614
  def BuildHooksEnv(self):
1615
    """Build hooks env.
1616

1617
    This runs on master, primary and secondary nodes of the instance.
1618

1619
    """
1620
    env = BuildInstanceHookEnvByObject(self, self.instance)
1621
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1622
    return env
1623

    
1624
  def BuildHooksNodes(self):
1625
    """Build hooks nodes.
1626

1627
    """
1628
    nl = [self.cfg.GetMasterNode()]
1629
    nl_post = list(self.instance.all_nodes) + nl
1630
    return (nl, nl_post)
1631

    
1632
  def CheckPrereq(self):
1633
    """Check prerequisites.
1634

1635
    This checks that the instance is in the cluster.
1636

1637
    """
1638
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1639
    assert self.instance is not None, \
1640
      "Cannot retrieve locked instance %s" % self.op.instance_name
1641

    
1642
  def Exec(self, feedback_fn):
1643
    """Remove the instance.
1644

1645
    """
1646
    instance = self.instance
1647
    logging.info("Shutting down instance %s on node %s",
1648
                 instance.name, instance.primary_node)
1649

    
1650
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1651
                                             self.op.shutdown_timeout,
1652
                                             self.op.reason)
1653
    msg = result.fail_msg
1654
    if msg:
1655
      if self.op.ignore_failures:
1656
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
1657
      else:
1658
        raise errors.OpExecError("Could not shutdown instance %s on"
1659
                                 " node %s: %s" %
1660
                                 (instance.name, instance.primary_node, msg))
1661

    
1662
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1663
            self.owned_locks(locking.LEVEL_NODE_RES))
1664
    assert not (set(instance.all_nodes) -
1665
                self.owned_locks(locking.LEVEL_NODE)), \
1666
      "Not owning correct locks"
1667

    
1668
    RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1669

    
1670

    
1671
class LUInstanceMove(LogicalUnit):
1672
  """Move an instance by data-copying.
1673

1674
  """
1675
  HPATH = "instance-move"
1676
  HTYPE = constants.HTYPE_INSTANCE
1677
  REQ_BGL = False
1678

    
1679
  def ExpandNames(self):
1680
    self._ExpandAndLockInstance()
1681
    target_node = ExpandNodeName(self.cfg, self.op.target_node)
1682
    self.op.target_node = target_node
1683
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
1684
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1685
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1686

    
1687
  def DeclareLocks(self, level):
1688
    if level == locking.LEVEL_NODE:
1689
      self._LockInstancesNodes(primary_only=True)
1690
    elif level == locking.LEVEL_NODE_RES:
1691
      # Copy node locks
1692
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1693
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1694

    
1695
  def BuildHooksEnv(self):
1696
    """Build hooks env.
1697

1698
    This runs on master, primary and secondary nodes of the instance.
1699

1700
    """
1701
    env = {
1702
      "TARGET_NODE": self.op.target_node,
1703
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1704
      }
1705
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1706
    return env
1707

    
1708
  def BuildHooksNodes(self):
1709
    """Build hooks nodes.
1710

1711
    """
1712
    nl = [
1713
      self.cfg.GetMasterNode(),
1714
      self.instance.primary_node,
1715
      self.op.target_node,
1716
      ]
1717
    return (nl, nl)
1718

    
1719
  def CheckPrereq(self):
1720
    """Check prerequisites.
1721

1722
    This checks that the instance is in the cluster.
1723

1724
    """
1725
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1726
    assert self.instance is not None, \
1727
      "Cannot retrieve locked instance %s" % self.op.instance_name
1728

    
1729
    if instance.disk_template not in constants.DTS_COPYABLE:
1730
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1731
                                 instance.disk_template, errors.ECODE_STATE)
1732

    
1733
    node = self.cfg.GetNodeInfo(self.op.target_node)
1734
    assert node is not None, \
1735
      "Cannot retrieve locked node %s" % self.op.target_node
1736

    
1737
    self.target_node = target_node = node.name
1738

    
1739
    if target_node == instance.primary_node:
1740
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1741
                                 (instance.name, target_node),
1742
                                 errors.ECODE_STATE)
1743

    
1744
    bep = self.cfg.GetClusterInfo().FillBE(instance)
1745

    
1746
    for idx, dsk in enumerate(instance.disks):
1747
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1748
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1749
                                   " cannot copy" % idx, errors.ECODE_STATE)
1750

    
1751
    CheckNodeOnline(self, target_node)
1752
    CheckNodeNotDrained(self, target_node)
1753
    CheckNodeVmCapable(self, target_node)
1754
    cluster = self.cfg.GetClusterInfo()
1755
    group_info = self.cfg.GetNodeGroup(node.group)
1756
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1757
    CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1758
                           ignore=self.op.ignore_ipolicy)
1759

    
1760
    if instance.admin_state == constants.ADMINST_UP:
1761
      # check memory requirements on the secondary node
1762
      CheckNodeFreeMemory(self, target_node,
1763
                          "failing over instance %s" %
1764
                          instance.name, bep[constants.BE_MAXMEM],
1765
                          instance.hypervisor)
1766
    else:
1767
      self.LogInfo("Not checking memory on the secondary node as"
1768
                   " instance will not be started")
1769

    
1770
    # check bridge existance
1771
    CheckInstanceBridgesExist(self, instance, node=target_node)
1772

    
1773
  def Exec(self, feedback_fn):
1774
    """Move an instance.
1775

1776
    The move is done by shutting it down on its present node, copying
1777
    the data over (slow) and starting it on the new node.
1778

1779
    """
1780
    instance = self.instance
1781

    
1782
    source_node = instance.primary_node
1783
    target_node = self.target_node
1784

    
1785
    self.LogInfo("Shutting down instance %s on source node %s",
1786
                 instance.name, source_node)
1787

    
1788
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1789
            self.owned_locks(locking.LEVEL_NODE_RES))
1790

    
1791
    result = self.rpc.call_instance_shutdown(source_node, instance,
1792
                                             self.op.shutdown_timeout,
1793
                                             self.op.reason)
1794
    msg = result.fail_msg
1795
    if msg:
1796
      if self.op.ignore_consistency:
1797
        self.LogWarning("Could not shutdown instance %s on node %s."
1798
                        " Proceeding anyway. Please make sure node"
1799
                        " %s is down. Error details: %s",
1800
                        instance.name, source_node, source_node, msg)
1801
      else:
1802
        raise errors.OpExecError("Could not shutdown instance %s on"
1803
                                 " node %s: %s" %
1804
                                 (instance.name, source_node, msg))
1805

    
1806
    # create the target disks
1807
    try:
1808
      CreateDisks(self, instance, target_node=target_node)
1809
    except errors.OpExecError:
1810
      self.LogWarning("Device creation failed")
1811
      self.cfg.ReleaseDRBDMinors(instance.name)
1812
      raise
1813

    
1814
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1815

    
1816
    errs = []
1817
    # activate, get path, copy the data over
1818
    for idx, disk in enumerate(instance.disks):
1819
      self.LogInfo("Copying data for disk %d", idx)
1820
      result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1821
                                               instance.name, True, idx)
1822
      if result.fail_msg:
1823
        self.LogWarning("Can't assemble newly created disk %d: %s",
1824
                        idx, result.fail_msg)
1825
        errs.append(result.fail_msg)
1826
        break
1827
      dev_path, _ = result.payload
1828
      result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1829
                                             target_node, dev_path,
1830
                                             cluster_name)
1831
      if result.fail_msg:
1832
        self.LogWarning("Can't copy data over for disk %d: %s",
1833
                        idx, result.fail_msg)
1834
        errs.append(result.fail_msg)
1835
        break
1836

    
1837
    if errs:
1838
      self.LogWarning("Some disks failed to copy, aborting")
1839
      try:
1840
        RemoveDisks(self, instance, target_node=target_node)
1841
      finally:
1842
        self.cfg.ReleaseDRBDMinors(instance.name)
1843
        raise errors.OpExecError("Errors during disk copy: %s" %
1844
                                 (",".join(errs),))
1845

    
1846
    instance.primary_node = target_node
1847
    self.cfg.Update(instance, feedback_fn)
1848

    
1849
    self.LogInfo("Removing the disks on the original node")
1850
    RemoveDisks(self, instance, target_node=source_node)
1851

    
1852
    # Only start the instance if it's marked as up
1853
    if instance.admin_state == constants.ADMINST_UP:
1854
      self.LogInfo("Starting instance %s on node %s",
1855
                   instance.name, target_node)
1856

    
1857
      disks_ok, _ = AssembleInstanceDisks(self, instance,
1858
                                          ignore_secondaries=True)
1859
      if not disks_ok:
1860
        ShutdownInstanceDisks(self, instance)
1861
        raise errors.OpExecError("Can't activate the instance's disks")
1862

    
1863
      result = self.rpc.call_instance_start(target_node,
1864
                                            (instance, None, None), False,
1865
                                            self.op.reason)
1866
      msg = result.fail_msg
1867
      if msg:
1868
        ShutdownInstanceDisks(self, instance)
1869
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1870
                                 (instance.name, target_node, msg))
1871

    
1872

    
1873
class LUInstanceMultiAlloc(NoHooksLU):
1874
  """Allocates multiple instances at the same time.
1875

1876
  """
1877
  REQ_BGL = False
1878

    
1879
  def CheckArguments(self):
1880
    """Check arguments.
1881

1882
    """
1883
    nodes = []
1884
    for inst in self.op.instances:
1885
      if inst.iallocator is not None:
1886
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1887
                                   " instance objects", errors.ECODE_INVAL)
1888
      nodes.append(bool(inst.pnode))
1889
      if inst.disk_template in constants.DTS_INT_MIRROR:
1890
        nodes.append(bool(inst.snode))
1891

    
1892
    has_nodes = compat.any(nodes)
1893
    if compat.all(nodes) ^ has_nodes:
1894
      raise errors.OpPrereqError("There are instance objects providing"
1895
                                 " pnode/snode while others do not",
1896
                                 errors.ECODE_INVAL)
1897

    
1898
    if not has_nodes and self.op.iallocator is None:
1899
      default_iallocator = self.cfg.GetDefaultIAllocator()
1900
      if default_iallocator:
1901
        self.op.iallocator = default_iallocator
1902
      else:
1903
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1904
                                   " given and no cluster-wide default"
1905
                                   " iallocator found; please specify either"
1906
                                   " an iallocator or nodes on the instances"
1907
                                   " or set a cluster-wide default iallocator",
1908
                                   errors.ECODE_INVAL)
1909

    
1910
    _CheckOpportunisticLocking(self.op)
1911

    
1912
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1913
    if dups:
1914
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1915
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1916

    
1917
  def ExpandNames(self):
1918
    """Calculate the locks.
1919

1920
    """
1921
    self.share_locks = ShareAll()
1922
    self.needed_locks = {
1923
      # iallocator will select nodes and even if no iallocator is used,
1924
      # collisions with LUInstanceCreate should be avoided
1925
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1926
      }
1927

    
1928
    if self.op.iallocator:
1929
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1930
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1931

    
1932
      if self.op.opportunistic_locking:
1933
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1934
    else:
1935
      nodeslist = []
1936
      for inst in self.op.instances:
1937
        inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1938
        nodeslist.append(inst.pnode)
1939
        if inst.snode is not None:
1940
          inst.snode = ExpandNodeName(self.cfg, inst.snode)
1941
          nodeslist.append(inst.snode)
1942

    
1943
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1944
      # Lock resources of instance's primary and secondary nodes (copy to
1945
      # prevent accidential modification)
1946
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1947

    
1948
  def DeclareLocks(self, level):
1949
    if level == locking.LEVEL_NODE_RES and \
1950
      self.opportunistic_locks[locking.LEVEL_NODE]:
1951
      # Even when using opportunistic locking, we require the same set of
1952
      # NODE_RES locks as we got NODE locks
1953
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1954
        self.owned_locks(locking.LEVEL_NODE)
1955

    
1956
  def CheckPrereq(self):
1957
    """Check prerequisite.
1958

1959
    """
1960
    if self.op.iallocator:
1961
      cluster = self.cfg.GetClusterInfo()
1962
      default_vg = self.cfg.GetVGName()
1963
      ec_id = self.proc.GetECId()
1964

    
1965
      if self.op.opportunistic_locking:
1966
        # Only consider nodes for which a lock is held
1967
        node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1968
      else:
1969
        node_whitelist = None
1970

    
1971
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1972
                                           _ComputeNics(op, cluster, None,
1973
                                                        self.cfg, ec_id),
1974
                                           _ComputeFullBeParams(op, cluster),
1975
                                           node_whitelist)
1976
               for op in self.op.instances]
1977

    
1978
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1979
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1980

    
1981
      ial.Run(self.op.iallocator)
1982

    
1983
      if not ial.success:
1984
        raise errors.OpPrereqError("Can't compute nodes using"
1985
                                   " iallocator '%s': %s" %
1986
                                   (self.op.iallocator, ial.info),
1987
                                   errors.ECODE_NORES)
1988

    
1989
      self.ia_result = ial.result
1990

    
1991
    if self.op.dry_run:
1992
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1993
        constants.JOB_IDS_KEY: [],
1994
        })
1995

    
1996
  def _ConstructPartialResult(self):
1997
    """Contructs the partial result.
1998

1999
    """
2000
    if self.op.iallocator:
2001
      (allocatable, failed_insts) = self.ia_result
2002
      allocatable_insts = map(compat.fst, allocatable)
2003
    else:
2004
      allocatable_insts = [op.instance_name for op in self.op.instances]
2005
      failed_insts = []
2006

    
2007
    return {
2008
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
2009
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
2010
      }
2011

    
2012
  def Exec(self, feedback_fn):
2013
    """Executes the opcode.
2014

2015
    """
2016
    jobs = []
2017
    if self.op.iallocator:
2018
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2019
      (allocatable, failed) = self.ia_result
2020

    
2021
      for (name, nodes) in allocatable:
2022
        op = op2inst.pop(name)
2023

    
2024
        if len(nodes) > 1:
2025
          (op.pnode, op.snode) = nodes
2026
        else:
2027
          (op.pnode,) = nodes
2028

    
2029
        jobs.append([op])
2030

    
2031
      missing = set(op2inst.keys()) - set(failed)
2032
      assert not missing, \
2033
        "Iallocator did return incomplete result: %s" % \
2034
        utils.CommaJoin(missing)
2035
    else:
2036
      jobs.extend([op] for op in self.op.instances)
2037

    
2038
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2039

    
2040

    
2041
class _InstNicModPrivate:
2042
  """Data structure for network interface modifications.
2043

2044
  Used by L{LUInstanceSetParams}.
2045

2046
  """
2047
  def __init__(self):
2048
    self.params = None
2049
    self.filled = None
2050

    
2051

    
2052
def _PrepareContainerMods(mods, private_fn):
2053
  """Prepares a list of container modifications by adding a private data field.
2054

2055
  @type mods: list of tuples; (operation, index, parameters)
2056
  @param mods: List of modifications
2057
  @type private_fn: callable or None
2058
  @param private_fn: Callable for constructing a private data field for a
2059
    modification
2060
  @rtype: list
2061

2062
  """
2063
  if private_fn is None:
2064
    fn = lambda: None
2065
  else:
2066
    fn = private_fn
2067

    
2068
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2069

    
2070

    
2071
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
2072
  """Checks if nodes have enough physical CPUs
2073

2074
  This function checks if all given nodes have the needed number of
2075
  physical CPUs. In case any node has less CPUs or we cannot get the
2076
  information from the node, this function raises an OpPrereqError
2077
  exception.
2078

2079
  @type lu: C{LogicalUnit}
2080
  @param lu: a logical unit from which we get configuration data
2081
  @type nodenames: C{list}
2082
  @param nodenames: the list of node names to check
2083
  @type requested: C{int}
2084
  @param requested: the minimum acceptable number of physical CPUs
2085
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2086
      or we cannot check the node
2087

2088
  """
2089
  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2090
  for node in nodenames:
2091
    info = nodeinfo[node]
2092
    info.Raise("Cannot get current information from node %s" % node,
2093
               prereq=True, ecode=errors.ECODE_ENVIRON)
2094
    (_, _, (hv_info, )) = info.payload
2095
    num_cpus = hv_info.get("cpu_total", None)
2096
    if not isinstance(num_cpus, int):
2097
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2098
                                 " on node %s, result was '%s'" %
2099
                                 (node, num_cpus), errors.ECODE_ENVIRON)
2100
    if requested > num_cpus:
2101
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2102
                                 "required" % (node, num_cpus, requested),
2103
                                 errors.ECODE_NORES)
2104

    
2105

    
2106
def GetItemFromContainer(identifier, kind, container):
2107
  """Return the item refered by the identifier.
2108

2109
  @type identifier: string
2110
  @param identifier: Item index or name or UUID
2111
  @type kind: string
2112
  @param kind: One-word item description
2113
  @type container: list
2114
  @param container: Container to get the item from
2115

2116
  """
2117
  # Index
2118
  try:
2119
    idx = int(identifier)
2120
    if idx == -1:
2121
      # Append
2122
      absidx = len(container) - 1
2123
    elif idx < 0:
2124
      raise IndexError("Not accepting negative indices other than -1")
2125
    elif idx > len(container):
2126
      raise IndexError("Got %s index %s, but there are only %s" %
2127
                       (kind, idx, len(container)))
2128
    else:
2129
      absidx = idx
2130
    return (absidx, container[idx])
2131
  except ValueError:
2132
    pass
2133

    
2134
  for idx, item in enumerate(container):
2135
    if item.uuid == identifier or item.name == identifier:
2136
      return (idx, item)
2137

    
2138
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2139
                             (kind, identifier), errors.ECODE_NOENT)
2140

    
2141

    
2142
def _ApplyContainerMods(kind, container, chgdesc, mods,
2143
                        create_fn, modify_fn, remove_fn):
2144
  """Applies descriptions in C{mods} to C{container}.
2145

2146
  @type kind: string
2147
  @param kind: One-word item description
2148
  @type container: list
2149
  @param container: Container to modify
2150
  @type chgdesc: None or list
2151
  @param chgdesc: List of applied changes
2152
  @type mods: list
2153
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2154
  @type create_fn: callable
2155
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2156
    receives absolute item index, parameters and private data object as added
2157
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2158
    as list
2159
  @type modify_fn: callable
2160
  @param modify_fn: Callback for modifying an existing item
2161
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2162
    and private data object as added by L{_PrepareContainerMods}, returns
2163
    changes as list
2164
  @type remove_fn: callable
2165
  @param remove_fn: Callback on removing item; receives absolute item index,
2166
    item and private data object as added by L{_PrepareContainerMods}
2167

2168
  """
2169
  for (op, identifier, params, private) in mods:
2170
    changes = None
2171

    
2172
    if op == constants.DDM_ADD:
2173
      # Calculate where item will be added
2174
      # When adding an item, identifier can only be an index
2175
      try:
2176
        idx = int(identifier)
2177
      except ValueError:
2178
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2179
                                   " identifier for %s" % constants.DDM_ADD,
2180
                                   errors.ECODE_INVAL)
2181
      if idx == -1:
2182
        addidx = len(container)
2183
      else:
2184
        if idx < 0:
2185
          raise IndexError("Not accepting negative indices other than -1")
2186
        elif idx > len(container):
2187
          raise IndexError("Got %s index %s, but there are only %s" %
2188
                           (kind, idx, len(container)))
2189
        addidx = idx
2190

    
2191
      if create_fn is None:
2192
        item = params
2193
      else:
2194
        (item, changes) = create_fn(addidx, params, private)
2195

    
2196
      if idx == -1:
2197
        container.append(item)
2198
      else:
2199
        assert idx >= 0
2200
        assert idx <= len(container)
2201
        # list.insert does so before the specified index
2202
        container.insert(idx, item)
2203
    else:
2204
      # Retrieve existing item
2205
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2206

    
2207
      if op == constants.DDM_REMOVE:
2208
        assert not params
2209

    
2210
        changes = [("%s/%s" % (kind, absidx), "remove")]
2211

    
2212
        if remove_fn is not None:
2213
          msg = remove_fn(absidx, item, private)
2214
          if msg:
2215
            changes.append(("%s/%s" % (kind, absidx), msg))
2216

    
2217
        assert container[absidx] == item
2218
        del container[absidx]
2219
      elif op == constants.DDM_MODIFY:
2220
        if modify_fn is not None:
2221
          changes = modify_fn(absidx, item, params, private)
2222
      else:
2223
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2224

    
2225
    assert _TApplyContModsCbChanges(changes)
2226

    
2227
    if not (chgdesc is None or changes is None):
2228
      chgdesc.extend(changes)
2229

    
2230

    
2231
def _UpdateIvNames(base_index, disks):
2232
  """Updates the C{iv_name} attribute of disks.
2233

2234
  @type disks: list of L{objects.Disk}
2235

2236
  """
2237
  for (idx, disk) in enumerate(disks):
2238
    disk.iv_name = "disk/%s" % (base_index + idx, )
2239

    
2240

    
2241
class LUInstanceSetParams(LogicalUnit):
2242
  """Modifies an instances's parameters.
2243

2244
  """
2245
  HPATH = "instance-modify"
2246
  HTYPE = constants.HTYPE_INSTANCE
2247
  REQ_BGL = False
2248

    
2249
  @staticmethod
2250
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2251
    assert ht.TList(mods)
2252
    assert not mods or len(mods[0]) in (2, 3)
2253

    
2254
    if mods and len(mods[0]) == 2:
2255
      result = []
2256

    
2257
      addremove = 0
2258
      for op, params in mods:
2259
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2260
          result.append((op, -1, params))
2261
          addremove += 1
2262

    
2263
          if addremove > 1:
2264
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2265
                                       " supported at a time" % kind,
2266
                                       errors.ECODE_INVAL)
2267
        else:
2268
          result.append((constants.DDM_MODIFY, op, params))
2269

    
2270
      assert verify_fn(result)
2271
    else:
2272
      result = mods
2273

    
2274
    return result
2275

    
2276
  @staticmethod
2277
  def _CheckMods(kind, mods, key_types, item_fn):
2278
    """Ensures requested disk/NIC modifications are valid.
2279

2280
    """
2281
    for (op, _, params) in mods:
2282
      assert ht.TDict(params)
2283

    
2284
      # If 'key_types' is an empty dict, we assume we have an
2285
      # 'ext' template and thus do not ForceDictType
2286
      if key_types:
2287
        utils.ForceDictType(params, key_types)
2288

    
2289
      if op == constants.DDM_REMOVE:
2290
        if params:
2291
          raise errors.OpPrereqError("No settings should be passed when"
2292
                                     " removing a %s" % kind,
2293
                                     errors.ECODE_INVAL)
2294
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2295
        item_fn(op, params)
2296
      else:
2297
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2298

    
2299
  def _VerifyDiskModification(self, op, params):
2300
    """Verifies a disk modification.
2301

2302
    """
2303
    if op == constants.DDM_ADD:
2304
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2305
      if mode not in constants.DISK_ACCESS_SET:
2306
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2307
                                   errors.ECODE_INVAL)
2308

    
2309
      size = params.get(constants.IDISK_SIZE, None)
2310
      if size is None:
2311
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2312
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2313

    
2314
      try:
2315
        size = int(size)
2316
      except (TypeError, ValueError), err:
2317
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2318
                                   errors.ECODE_INVAL)
2319

    
2320
      params[constants.IDISK_SIZE] = size
2321
      name = params.get(constants.IDISK_NAME, None)
2322
      if name is not None and name.lower() == constants.VALUE_NONE:
2323
        params[constants.IDISK_NAME] = None
2324

    
2325
    elif op == constants.DDM_MODIFY:
2326
      if constants.IDISK_SIZE in params:
2327
        raise errors.OpPrereqError("Disk size change not possible, use"
2328
                                   " grow-disk", errors.ECODE_INVAL)
2329

    
2330
      # Disk modification supports changing only the disk name and mode.
2331
      # Changing arbitrary parameters is allowed only for ext disk template",
2332
      if self.instance.disk_template != constants.DT_EXT:
2333
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2334

    
2335
      name = params.get(constants.IDISK_NAME, None)
2336
      if name is not None and name.lower() == constants.VALUE_NONE:
2337
        params[constants.IDISK_NAME] = None
2338

    
2339
  @staticmethod
2340
  def _VerifyNicModification(op, params):
2341
    """Verifies a network interface modification.
2342

2343
    """
2344
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2345
      ip = params.get(constants.INIC_IP, None)
2346
      name = params.get(constants.INIC_NAME, None)
2347
      req_net = params.get(constants.INIC_NETWORK, None)
2348
      link = params.get(constants.NIC_LINK, None)
2349
      mode = params.get(constants.NIC_MODE, None)
2350
      if name is not None and name.lower() == constants.VALUE_NONE:
2351
        params[constants.INIC_NAME] = None
2352
      if req_net is not None:
2353
        if req_net.lower() == constants.VALUE_NONE:
2354
          params[constants.INIC_NETWORK] = None
2355
          req_net = None
2356
        elif link is not None or mode is not None:
2357
          raise errors.OpPrereqError("If network is given"
2358
                                     " mode or link should not",
2359
                                     errors.ECODE_INVAL)
2360

    
2361
      if op == constants.DDM_ADD:
2362
        macaddr = params.get(constants.INIC_MAC, None)
2363
        if macaddr is None:
2364
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2365

    
2366
      if ip is not None:
2367
        if ip.lower() == constants.VALUE_NONE:
2368
          params[constants.INIC_IP] = None
2369
        else:
2370
          if ip.lower() == constants.NIC_IP_POOL:
2371
            if op == constants.DDM_ADD and req_net is None:
2372
              raise errors.OpPrereqError("If ip=pool, parameter network"
2373
                                         " cannot be none",
2374
                                         errors.ECODE_INVAL)
2375
          else:
2376
            if not netutils.IPAddress.IsValid(ip):
2377
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2378
                                         errors.ECODE_INVAL)
2379

    
2380
      if constants.INIC_MAC in params:
2381
        macaddr = params[constants.INIC_MAC]
2382
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2383
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2384

    
2385
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2386
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2387
                                     " modifying an existing NIC",
2388
                                     errors.ECODE_INVAL)
2389

    
2390
  def CheckArguments(self):
2391
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2392
            self.op.hvparams or self.op.beparams or self.op.os_name or
2393
            self.op.osparams or self.op.offline is not None or
2394
            self.op.runtime_mem or self.op.pnode):
2395
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2396

    
2397
    if self.op.hvparams:
2398
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2399
                           "hypervisor", "instance", "cluster")
2400

    
2401
    self.op.disks = self._UpgradeDiskNicMods(
2402
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2403
    self.op.nics = self._UpgradeDiskNicMods(
2404
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2405

    
2406
    if self.op.disks and self.op.disk_template is not None:
2407
      raise errors.OpPrereqError("Disk template conversion and other disk"
2408
                                 " changes not supported at the same time",
2409
                                 errors.ECODE_INVAL)
2410

    
2411
    if (self.op.disk_template and
2412
        self.op.disk_template in constants.DTS_INT_MIRROR and
2413
        self.op.remote_node is None):
2414
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2415
                                 " one requires specifying a secondary node",
2416
                                 errors.ECODE_INVAL)
2417

    
2418
    # Check NIC modifications
2419
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2420
                    self._VerifyNicModification)
2421

    
2422
    if self.op.pnode:
2423
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2424

    
2425
  def ExpandNames(self):
2426
    self._ExpandAndLockInstance()
2427
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2428
    # Can't even acquire node locks in shared mode as upcoming changes in
2429
    # Ganeti 2.6 will start to modify the node object on disk conversion
2430
    self.needed_locks[locking.LEVEL_NODE] = []
2431
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2432
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2433
    # Look node group to look up the ipolicy
2434
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2435

    
2436
  def DeclareLocks(self, level):
2437
    if level == locking.LEVEL_NODEGROUP:
2438
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2439
      # Acquire locks for the instance's nodegroups optimistically. Needs
2440
      # to be verified in CheckPrereq
2441
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2442
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2443
    elif level == locking.LEVEL_NODE:
2444
      self._LockInstancesNodes()
2445
      if self.op.disk_template and self.op.remote_node:
2446
        self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2447
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2448
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2449
      # Copy node locks
2450
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2451
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2452

    
2453
  def BuildHooksEnv(self):
2454
    """Build hooks env.
2455

2456
    This runs on the master, primary and secondaries.
2457

2458
    """
2459
    args = {}
2460
    if constants.BE_MINMEM in self.be_new:
2461
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2462
    if constants.BE_MAXMEM in self.be_new:
2463
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2464
    if constants.BE_VCPUS in self.be_new:
2465
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2466
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2467
    # information at all.
2468

    
2469
    if self._new_nics is not None:
2470
      nics = []
2471

    
2472
      for nic in self._new_nics:
2473
        n = copy.deepcopy(nic)
2474
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2475
        n.nicparams = nicparams
2476
        nics.append(NICToTuple(self, n))
2477

    
2478
      args["nics"] = nics
2479

    
2480
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2481
    if self.op.disk_template:
2482
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2483
    if self.op.runtime_mem:
2484
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2485

    
2486
    return env
2487

    
2488
  def BuildHooksNodes(self):
2489
    """Build hooks nodes.
2490

2491
    """
2492
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2493
    return (nl, nl)
2494

    
2495
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2496
                              old_params, cluster, pnode):
2497

    
2498
    update_params_dict = dict([(key, params[key])
2499
                               for key in constants.NICS_PARAMETERS
2500
                               if key in params])
2501

    
2502
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2503
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2504

    
2505
    new_net_uuid = None
2506
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2507
    if new_net_uuid_or_name:
2508
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2509
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2510

    
2511
    if old_net_uuid:
2512
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2513

    
2514
    if new_net_uuid:
2515
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2516
      if not netparams:
2517
        raise errors.OpPrereqError("No netparams found for the network"
2518
                                   " %s, probably not connected" %
2519
                                   new_net_obj.name, errors.ECODE_INVAL)
2520
      new_params = dict(netparams)
2521
    else:
2522
      new_params = GetUpdatedParams(old_params, update_params_dict)
2523

    
2524
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2525

    
2526
    new_filled_params = cluster.SimpleFillNIC(new_params)
2527
    objects.NIC.CheckParameterSyntax(new_filled_params)
2528

    
2529
    new_mode = new_filled_params[constants.NIC_MODE]
2530
    if new_mode == constants.NIC_MODE_BRIDGED:
2531
      bridge = new_filled_params[constants.NIC_LINK]
2532
      msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2533
      if msg:
2534
        msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2535
        if self.op.force:
2536
          self.warn.append(msg)
2537
        else:
2538
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2539

    
2540
    elif new_mode == constants.NIC_MODE_ROUTED:
2541
      ip = params.get(constants.INIC_IP, old_ip)
2542
      if ip is None:
2543
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2544
                                   " on a routed NIC", errors.ECODE_INVAL)
2545

    
2546
    elif new_mode == constants.NIC_MODE_OVS:
2547
      # TODO: check OVS link
2548
      self.LogInfo("OVS links are currently not checked for correctness")
2549

    
2550
    if constants.INIC_MAC in params:
2551
      mac = params[constants.INIC_MAC]
2552
      if mac is None:
2553
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2554
                                   errors.ECODE_INVAL)
2555
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2556
        # otherwise generate the MAC address
2557
        params[constants.INIC_MAC] = \
2558
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2559
      else:
2560
        # or validate/reserve the current one
2561
        try:
2562
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2563
        except errors.ReservationError:
2564
          raise errors.OpPrereqError("MAC address '%s' already in use"
2565
                                     " in cluster" % mac,
2566
                                     errors.ECODE_NOTUNIQUE)
2567
    elif new_net_uuid != old_net_uuid:
2568

    
2569
      def get_net_prefix(net_uuid):
2570
        mac_prefix = None
2571
        if net_uuid:
2572
          nobj = self.cfg.GetNetwork(net_uuid)
2573
          mac_prefix = nobj.mac_prefix
2574

    
2575
        return mac_prefix
2576

    
2577
      new_prefix = get_net_prefix(new_net_uuid)
2578
      old_prefix = get_net_prefix(old_net_uuid)
2579
      if old_prefix != new_prefix:
2580
        params[constants.INIC_MAC] = \
2581
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2582

    
2583
    # if there is a change in (ip, network) tuple
2584
    new_ip = params.get(constants.INIC_IP, old_ip)
2585
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2586
      if new_ip:
2587
        # if IP is pool then require a network and generate one IP
2588
        if new_ip.lower() == constants.NIC_IP_POOL:
2589
          if new_net_uuid:
2590
            try:
2591
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2592
            except errors.ReservationError:
2593
              raise errors.OpPrereqError("Unable to get a free IP"
2594
                                         " from the address pool",
2595
                                         errors.ECODE_STATE)
2596
            self.LogInfo("Chose IP %s from network %s",
2597
                         new_ip,
2598
                         new_net_obj.name)
2599
            params[constants.INIC_IP] = new_ip
2600
          else:
2601
            raise errors.OpPrereqError("ip=pool, but no network found",
2602
                                       errors.ECODE_INVAL)
2603
        # Reserve new IP if in the new network if any
2604
        elif new_net_uuid:
2605
          try:
2606
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2607
                               check=self.op.conflicts_check)
2608
            self.LogInfo("Reserving IP %s in network %s",
2609
                         new_ip, new_net_obj.name)
2610
          except errors.ReservationError:
2611
            raise errors.OpPrereqError("IP %s not available in network %s" %
2612
                                       (new_ip, new_net_obj.name),
2613
                                       errors.ECODE_NOTUNIQUE)
2614
        # new network is None so check if new IP is a conflicting IP
2615
        elif self.op.conflicts_check:
2616
          _CheckForConflictingIp(self, new_ip, pnode)
2617

    
2618
      # release old IP if old network is not None
2619
      if old_ip and old_net_uuid:
2620
        try:
2621
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2622
        except errors.AddressPoolError:
2623
          logging.warning("Release IP %s not contained in network %s",
2624
                          old_ip, old_net_obj.name)
2625

    
2626
    # there are no changes in (ip, network) tuple and old network is not None
2627
    elif (old_net_uuid is not None and
2628
          (req_link is not None or req_mode is not None)):
2629
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2630
                                 " a NIC that is connected to a network",
2631
                                 errors.ECODE_INVAL)
2632

    
2633
    private.params = new_params
2634
    private.filled = new_filled_params
2635

    
2636
  def _PreCheckDiskTemplate(self, pnode_info):
2637
    """CheckPrereq checks related to a new disk template."""
2638
    # Arguments are passed to avoid configuration lookups
2639
    instance = self.instance
2640
    pnode = instance.primary_node
2641
    cluster = self.cluster
2642
    if instance.disk_template == self.op.disk_template:
2643
      raise errors.OpPrereqError("Instance already has disk template %s" %
2644
                                 instance.disk_template, errors.ECODE_INVAL)
2645

    
2646
    if (instance.disk_template,
2647
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2648
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2649
                                 " %s to %s" % (instance.disk_template,
2650
                                                self.op.disk_template),
2651
                                 errors.ECODE_INVAL)
2652
    CheckInstanceState(self, instance, INSTANCE_DOWN,
2653
                       msg="cannot change disk template")
2654
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2655
      if self.op.remote_node == pnode:
2656
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2657
                                   " as the primary node of the instance" %
2658
                                   self.op.remote_node, errors.ECODE_STATE)
2659
      CheckNodeOnline(self, self.op.remote_node)
2660
      CheckNodeNotDrained(self, self.op.remote_node)
2661
      # FIXME: here we assume that the old instance type is DT_PLAIN
2662
      assert instance.disk_template == constants.DT_PLAIN
2663
      disks = [{constants.IDISK_SIZE: d.size,
2664
                constants.IDISK_VG: d.logical_id[0]}
2665
               for d in instance.disks]
2666
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2667
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2668

    
2669
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2670
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2671
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2672
                                                              snode_group)
2673
      CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2674
                             ignore=self.op.ignore_ipolicy)
2675
      if pnode_info.group != snode_info.group:
2676
        self.LogWarning("The primary and secondary nodes are in two"
2677
                        " different node groups; the disk parameters"
2678
                        " from the first disk's node group will be"
2679
                        " used")
2680

    
2681
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2682
      # Make sure none of the nodes require exclusive storage
2683
      nodes = [pnode_info]
2684
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2685
        assert snode_info
2686
        nodes.append(snode_info)
2687
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2688
      if compat.any(map(has_es, nodes)):
2689
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2690
                  " storage is enabled" % (instance.disk_template,
2691
                                           self.op.disk_template))
2692
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2693

    
2694
  # too many local variables
2695
  # pylint: disable=R0914
2696
  def CheckPrereq(self):
2697
    """Check prerequisites.
2698

2699
    This only checks the instance list against the existing names.
2700

2701
    """
2702
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2703
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2704

    
2705
    cluster = self.cluster = self.cfg.GetClusterInfo()
2706
    assert self.instance is not None, \
2707
      "Cannot retrieve locked instance %s" % self.op.instance_name
2708

    
2709
    pnode = instance.primary_node
2710

    
2711
    self.warn = []
2712

    
2713
    if (self.op.pnode is not None and self.op.pnode != pnode and
2714
        not self.op.force):
2715
      # verify that the instance is not up
2716
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2717
                                                  instance.hypervisor)
2718
      if instance_info.fail_msg:
2719
        self.warn.append("Can't get instance runtime information: %s" %
2720
                         instance_info.fail_msg)
2721
      elif instance_info.payload:
2722
        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2723
                                   errors.ECODE_STATE)
2724

    
2725
    assert pnode in self.owned_locks(locking.LEVEL_NODE)
2726
    nodelist = list(instance.all_nodes)
2727
    pnode_info = self.cfg.GetNodeInfo(pnode)
2728
    self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2729

    
2730
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2731
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2732
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2733

    
2734
    # dictionary with instance information after the modification
2735
    ispec = {}
2736

    
2737
    # Check disk modifications. This is done here and not in CheckArguments
2738
    # (as with NICs), because we need to know the instance's disk template
2739
    if instance.disk_template == constants.DT_EXT:
2740
      self._CheckMods("disk", self.op.disks, {},
2741
                      self._VerifyDiskModification)
2742
    else:
2743
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2744
                      self._VerifyDiskModification)
2745

    
2746
    # Prepare disk/NIC modifications
2747
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2748
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2749

    
2750
    # Check the validity of the `provider' parameter
2751
    if instance.disk_template in constants.DT_EXT:
2752
      for mod in self.diskmod:
2753
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2754
        if mod[0] == constants.DDM_ADD:
2755
          if ext_provider is None:
2756
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2757
                                       " '%s' missing, during disk add" %
2758
                                       (constants.DT_EXT,
2759
                                        constants.IDISK_PROVIDER),
2760
                                       errors.ECODE_NOENT)
2761
        elif mod[0] == constants.DDM_MODIFY:
2762
          if ext_provider:
2763
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2764
                                       " modification" %
2765
                                       constants.IDISK_PROVIDER,
2766
                                       errors.ECODE_INVAL)
2767
    else:
2768
      for mod in self.diskmod:
2769
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2770
        if ext_provider is not None:
2771
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2772
                                     " instances of type '%s'" %
2773
                                     (constants.IDISK_PROVIDER,
2774
                                      constants.DT_EXT),
2775
                                     errors.ECODE_INVAL)
2776

    
2777
    if self.op.hotplug or self.op.hotplug_if_possible:
2778
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2779
                                               self.instance)
2780
      if result.fail_msg:
2781
        if self.op.hotplug:
2782
          result.Raise("Hotplug is not possible: %s" % result.fail_msg,
2783
                       prereq=True)
2784
        else:
2785
          self.LogWarning(result.fail_msg)
2786
          self.op.hotplug = False
2787
          self.LogInfo("Modification will take place without hotplugging.")
2788
      else:
2789
        self.op.hotplug = True
2790

    
2791
    # OS change
2792
    if self.op.os_name and not self.op.force:
2793
      CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2794
                     self.op.force_variant)
2795
      instance_os = self.op.os_name
2796
    else:
2797
      instance_os = instance.os
2798

    
2799
    assert not (self.op.disk_template and self.op.disks), \
2800
      "Can't modify disk template and apply disk changes at the same time"
2801

    
2802
    if self.op.disk_template:
2803
      self._PreCheckDiskTemplate(pnode_info)
2804

    
2805
    # hvparams processing
2806
    if self.op.hvparams:
2807
      hv_type = instance.hypervisor
2808
      i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2809
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2810
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2811

    
2812
      # local check
2813
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2814
      CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2815
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2816
      self.hv_inst = i_hvdict # the new dict (without defaults)
2817
    else:
2818
      self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2819
                                              instance.hvparams)
2820
      self.hv_new = self.hv_inst = {}
2821

    
2822
    # beparams processing
2823
    if self.op.beparams:
2824
      i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2825
                                  use_none=True)
2826
      objects.UpgradeBeParams(i_bedict)
2827
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2828
      be_new = cluster.SimpleFillBE(i_bedict)
2829
      self.be_proposed = self.be_new = be_new # the new actual values
2830
      self.be_inst = i_bedict # the new dict (without defaults)
2831
    else:
2832
      self.be_new = self.be_inst = {}
2833
      self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2834
    be_old = cluster.FillBE(instance)
2835

    
2836
    # CPU param validation -- checking every time a parameter is
2837
    # changed to cover all cases where either CPU mask or vcpus have
2838
    # changed
2839
    if (constants.BE_VCPUS in self.be_proposed and
2840
        constants.HV_CPU_MASK in self.hv_proposed):
2841
      cpu_list = \
2842
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2843
      # Verify mask is consistent with number of vCPUs. Can skip this
2844
      # test if only 1 entry in the CPU mask, which means same mask
2845
      # is applied to all vCPUs.
2846
      if (len(cpu_list) > 1 and
2847
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2848
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2849
                                   " CPU mask [%s]" %
2850
                                   (self.be_proposed[constants.BE_VCPUS],
2851
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2852
                                   errors.ECODE_INVAL)
2853

    
2854
      # Only perform this test if a new CPU mask is given
2855
      if constants.HV_CPU_MASK in self.hv_new:
2856
        # Calculate the largest CPU number requested
2857
        max_requested_cpu = max(map(max, cpu_list))
2858
        # Check that all of the instance's nodes have enough physical CPUs to
2859
        # satisfy the requested CPU mask
2860
        _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2861
                                max_requested_cpu + 1, instance.hypervisor)
2862

    
2863
    # osparams processing
2864
    if self.op.osparams:
2865
      i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2866
      CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2867
      self.os_inst = i_osdict # the new dict (without defaults)
2868
    else:
2869
      self.os_inst = {}
2870

    
2871
    #TODO(dynmem): do the appropriate check involving MINMEM
2872
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2873
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2874
      mem_check_list = [pnode]
2875
      if be_new[constants.BE_AUTO_BALANCE]:
2876
        # either we changed auto_balance to yes or it was from before
2877
        mem_check_list.extend(instance.secondary_nodes)
2878
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2879
                                                  instance.hypervisor)
2880
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2881
                                         [instance.hypervisor], False)
2882
      pninfo = nodeinfo[pnode]
2883
      msg = pninfo.fail_msg
2884
      if msg:
2885
        # Assume the primary node is unreachable and go ahead
2886
        self.warn.append("Can't get info from primary node %s: %s" %
2887
                         (pnode, msg))
2888
      else:
2889
        (_, _, (pnhvinfo, )) = pninfo.payload
2890
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2891
          self.warn.append("Node data from primary node %s doesn't contain"
2892
                           " free memory information" % pnode)
2893
        elif instance_info.fail_msg:
2894
          self.warn.append("Can't get instance runtime information: %s" %
2895
                           instance_info.fail_msg)
2896
        else:
2897
          if instance_info.payload:
2898
            current_mem = int(instance_info.payload["memory"])
2899
          else:
2900
            # Assume instance not running
2901
            # (there is a slight race condition here, but it's not very
2902
            # probable, and we have no other way to check)
2903
            # TODO: Describe race condition
2904
            current_mem = 0
2905
          #TODO(dynmem): do the appropriate check involving MINMEM
2906
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2907
                      pnhvinfo["memory_free"])
2908
          if miss_mem > 0:
2909
            raise errors.OpPrereqError("This change will prevent the instance"
2910
                                       " from starting, due to %d MB of memory"
2911
                                       " missing on its primary node" %
2912
                                       miss_mem, errors.ECODE_NORES)
2913

    
2914
      if be_new[constants.BE_AUTO_BALANCE]:
2915
        for node, nres in nodeinfo.items():
2916
          if node not in instance.secondary_nodes:
2917
            continue
2918
          nres.Raise("Can't get info from secondary node %s" % node,
2919
                     prereq=True, ecode=errors.ECODE_STATE)
2920
          (_, _, (nhvinfo, )) = nres.payload
2921
          if not isinstance(nhvinfo.get("memory_free", None), int):
2922
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2923
                                       " memory information" % node,
2924
                                       errors.ECODE_STATE)
2925
          #TODO(dynmem): do the appropriate check involving MINMEM
2926
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2927
            raise errors.OpPrereqError("This change will prevent the instance"
2928
                                       " from failover to its secondary node"
2929
                                       " %s, due to not enough memory" % node,
2930
                                       errors.ECODE_STATE)
2931

    
2932
    if self.op.runtime_mem:
2933
      remote_info = self.rpc.call_instance_info(instance.primary_node,
2934
                                                instance.name,
2935
                                                instance.hypervisor)
2936
      remote_info.Raise("Error checking node %s" % instance.primary_node)
2937
      if not remote_info.payload: # not running already
2938
        raise errors.OpPrereqError("Instance %s is not running" %
2939
                                   instance.name, errors.ECODE_STATE)
2940

    
2941
      current_memory = remote_info.payload["memory"]
2942
      if (not self.op.force and
2943
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2944
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2945
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2946
                                   " and %d MB of memory unless --force is"
2947
                                   " given" %
2948
                                   (instance.name,
2949
                                    self.be_proposed[constants.BE_MINMEM],
2950
                                    self.be_proposed[constants.BE_MAXMEM]),
2951
                                   errors.ECODE_INVAL)
2952

    
2953
      delta = self.op.runtime_mem - current_memory
2954
      if delta > 0:
2955
        CheckNodeFreeMemory(self, instance.primary_node,
2956
                            "ballooning memory for instance %s" %
2957
                            instance.name, delta, instance.hypervisor)
2958

    
2959
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2960
      raise errors.OpPrereqError("Disk operations not supported for"
2961
                                 " diskless instances", errors.ECODE_INVAL)
2962

    
2963
    def _PrepareNicCreate(_, params, private):
2964
      self._PrepareNicModification(params, private, None, None,
2965
                                   {}, cluster, pnode)
2966
      return (None, None)
2967

    
2968
    def _PrepareNicMod(_, nic, params, private):
2969
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2970
                                   nic.nicparams, cluster, pnode)
2971
      return None
2972

    
2973
    def _PrepareNicRemove(_, params, __):
2974
      ip = params.ip
2975
      net = params.network
2976
      if net is not None and ip is not None:
2977
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2978

    
2979
    # Verify NIC changes (operating on copy)
2980
    nics = instance.nics[:]
2981
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2982
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2983
    if len(nics) > constants.MAX_NICS:
2984
      raise errors.OpPrereqError("Instance has too many network interfaces"
2985
                                 " (%d), cannot add more" % constants.MAX_NICS,
2986
                                 errors.ECODE_STATE)
2987

    
2988
    def _PrepareDiskMod(_, disk, params, __):
2989
      disk.name = params.get(constants.IDISK_NAME, None)
2990

    
2991
    # Verify disk changes (operating on a copy)
2992
    disks = copy.deepcopy(instance.disks)
2993
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2994
                        _PrepareDiskMod, None)
2995
    utils.ValidateDeviceNames("disk", disks)
2996
    if len(disks) > constants.MAX_DISKS:
2997
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2998
                                 " more" % constants.MAX_DISKS,
2999
                                 errors.ECODE_STATE)
3000
    disk_sizes = [disk.size for disk in instance.disks]
3001
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
3002
                      self.diskmod if op == constants.DDM_ADD)
3003
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
3004
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
3005

    
3006
    if self.op.offline is not None and self.op.offline:
3007
      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
3008
                         msg="can't change to offline")
3009

    
3010
    # Pre-compute NIC changes (necessary to use result in hooks)
3011
    self._nic_chgdesc = []
3012
    if self.nicmod:
3013
      # Operate on copies as this is still in prereq
3014
      nics = [nic.Copy() for nic in instance.nics]
3015
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3016
                          self._CreateNewNic, self._ApplyNicMods,
3017
                          self._RemoveNic)
3018
      # Verify that NIC names are unique and valid
3019
      utils.ValidateDeviceNames("NIC", nics)
3020
      self._new_nics = nics
3021
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3022
    else:
3023
      self._new_nics = None
3024
      ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
3025

    
3026
    if not self.op.ignore_ipolicy:
3027
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
3028
                                                              group_info)
3029

    
3030
      # Fill ispec with backend parameters
3031
      ispec[constants.ISPEC_SPINDLE_USE] = \
3032
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3033
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3034
                                                         None)
3035

    
3036
      # Copy ispec to verify parameters with min/max values separately
3037
      if self.op.disk_template:
3038
        new_disk_template = self.op.disk_template
3039
      else:
3040
        new_disk_template = instance.disk_template
3041
      ispec_max = ispec.copy()
3042
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3043
        self.be_new.get(constants.BE_MAXMEM, None)
3044
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3045
                                                     new_disk_template)
3046
      ispec_min = ispec.copy()
3047
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3048
        self.be_new.get(constants.BE_MINMEM, None)
3049
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3050
                                                     new_disk_template)
3051

    
3052
      if (res_max or res_min):
3053
        # FIXME: Improve error message by including information about whether
3054
        # the upper or lower limit of the parameter fails the ipolicy.
3055
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3056
               (group_info, group_info.name,
3057
                utils.CommaJoin(set(res_max + res_min))))
3058
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3059

    
3060
  def _ConvertPlainToDrbd(self, feedback_fn):
3061
    """Converts an instance from plain to drbd.
3062

3063
    """
3064
    feedback_fn("Converting template to drbd")
3065
    instance = self.instance
3066
    pnode = instance.primary_node
3067
    snode = self.op.remote_node
3068

    
3069
    assert instance.disk_template == constants.DT_PLAIN
3070

    
3071
    # create a fake disk info for _GenerateDiskTemplate
3072
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3073
                  constants.IDISK_VG: d.logical_id[0],
3074
                  constants.IDISK_NAME: d.name}
3075
                 for d in instance.disks]
3076
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3077
                                     instance.name, pnode, [snode],
3078
                                     disk_info, None, None, 0, feedback_fn,
3079
                                     self.diskparams)
3080
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3081
                                        self.diskparams)
3082
    p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
3083
    s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
3084
    info = GetInstanceInfoText(instance)
3085
    feedback_fn("Creating additional volumes...")
3086
    # first, create the missing data and meta devices
3087
    for disk in anno_disks:
3088
      # unfortunately this is... not too nice
3089
      CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3090
                           info, True, p_excl_stor)
3091
      for child in disk.children:
3092
        CreateSingleBlockDev(self, snode, instance, child, info, True,
3093
                             s_excl_stor)
3094
    # at this stage, all new LVs have been created, we can rename the
3095
    # old ones
3096
    feedback_fn("Renaming original volumes...")
3097
    rename_list = [(o, n.children[0].logical_id)
3098
                   for (o, n) in zip(instance.disks, new_disks)]
3099
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
3100
    result.Raise("Failed to rename original LVs")
3101

    
3102
    feedback_fn("Initializing DRBD devices...")
3103
    # all child devices are in place, we can now create the DRBD devices
3104
    try:
3105
      for disk in anno_disks:
3106
        for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3107
          f_create = node == pnode
3108
          CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3109
                               excl_stor)
3110
    except errors.GenericError, e:
3111
      feedback_fn("Initializing of DRBD devices failed;"
3112
                  " renaming back original volumes...")
3113
      for disk in new_disks:
3114
        self.cfg.SetDiskID(disk, pnode)
3115
      rename_back_list = [(n.children[0], o.logical_id)
3116
                          for (n, o) in zip(new_disks, instance.disks)]
3117
      result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3118
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3119
      raise
3120

    
3121
    # at this point, the instance has been modified
3122
    instance.disk_template = constants.DT_DRBD8
3123
    instance.disks = new_disks
3124
    self.cfg.Update(instance, feedback_fn)
3125

    
3126
    # Release node locks while waiting for sync
3127
    ReleaseLocks(self, locking.LEVEL_NODE)
3128

    
3129
    # disks are created, waiting for sync
3130
    disk_abort = not WaitForSync(self, instance,
3131
                                 oneshot=not self.op.wait_for_sync)
3132
    if disk_abort:
3133
      raise errors.OpExecError("There are some degraded disks for"
3134
                               " this instance, please cleanup manually")
3135

    
3136
    # Node resource locks will be released by caller
3137

    
3138
  def _ConvertDrbdToPlain(self, feedback_fn):
3139
    """Converts an instance from drbd to plain.
3140

3141
    """
3142
    instance = self.instance
3143

    
3144
    assert len(instance.secondary_nodes) == 1
3145
    assert instance.disk_template == constants.DT_DRBD8
3146

    
3147
    pnode = instance.primary_node
3148
    snode = instance.secondary_nodes[0]
3149
    feedback_fn("Converting template to plain")
3150

    
3151
    old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3152
    new_disks = [d.children[0] for d in instance.disks]
3153

    
3154
    # copy over size, mode and name
3155
    for parent, child in zip(old_disks, new_disks):
3156
      child.size = parent.size
3157
      child.mode = parent.mode
3158
      child.name = parent.name
3159

    
3160
    # this is a DRBD disk, return its port to the pool
3161
    # NOTE: this must be done right before the call to cfg.Update!
3162
    for disk in old_disks:
3163
      tcp_port = disk.logical_id[2]
3164
      self.cfg.AddTcpUdpPort(tcp_port)
3165

    
3166
    # update instance structure
3167
    instance.disks = new_disks
3168
    instance.disk_template = constants.DT_PLAIN
3169
    _UpdateIvNames(0, instance.disks)
3170
    self.cfg.Update(instance, feedback_fn)
3171

    
3172
    # Release locks in case removing disks takes a while
3173
    ReleaseLocks(self, locking.LEVEL_NODE)
3174

    
3175
    feedback_fn("Removing volumes on the secondary node...")
3176
    for disk in old_disks:
3177
      self.cfg.SetDiskID(disk, snode)
3178
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3179
      if msg:
3180
        self.LogWarning("Could not remove block device %s on node %s,"
3181
                        " continuing anyway: %s", disk.iv_name, snode, msg)
3182

    
3183
    feedback_fn("Removing unneeded volumes on the primary node...")
3184
    for idx, disk in enumerate(old_disks):
3185
      meta = disk.children[1]
3186
      self.cfg.SetDiskID(meta, pnode)
3187
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3188
      if msg:
3189
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3190
                        " continuing anyway: %s", idx, pnode, msg)
3191

    
3192
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3193
    self.LogInfo("Trying to hotplug device...")
3194
    msg = "hotplug:"
3195
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3196
                                          self.instance, action, dev_type,
3197
                                          (device, self.instance),
3198
                                          extra, seq)
3199
    if result.fail_msg:
3200
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3201
      self.LogInfo("Continuing execution..")
3202
      msg += "failed"
3203
    else:
3204
      self.LogInfo("Hotplug done.")
3205
      msg += "done"
3206
    return msg
3207

    
3208
  def _CreateNewDisk(self, idx, params, _):
3209
    """Creates a new disk.
3210

3211
    """
3212
    instance = self.instance
3213

    
3214
    # add a new disk
3215
    if instance.disk_template in constants.DTS_FILEBASED:
3216
      (file_driver, file_path) = instance.disks[0].logical_id
3217
      file_path = os.path.dirname(file_path)
3218
    else:
3219
      file_driver = file_path = None
3220

    
3221
    disk = \
3222
      GenerateDiskTemplate(self, instance.disk_template, instance.name,
3223
                           instance.primary_node, instance.secondary_nodes,
3224
                           [params], file_path, file_driver, idx,
3225
                           self.Log, self.diskparams)[0]
3226

    
3227
    new_disks = CreateDisks(self, instance, disks=[disk])
3228

    
3229
    if self.cluster.prealloc_wipe_disks:
3230
      # Wipe new disk
3231
      WipeOrCleanupDisks(self, instance,
3232
                         disks=[(idx, disk, 0)],
3233
                         cleanup=new_disks)
3234

    
3235
    changes = [
3236
      ("disk/%d" % idx,
3237
      "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3238
      ]
3239
    if self.op.hotplug:
3240
      self.cfg.SetDiskID(disk, self.instance.primary_node)
3241
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3242
                                               (disk, self.instance),
3243
                                               self.instance.name, True, idx)
3244
      if result.fail_msg:
3245
        changes.append(("disk/%d" % idx, "assemble:failed"))
3246
        self.LogWarning("Can't assemble newly created disk %d: %s",
3247
                        idx, result.fail_msg)
3248
      else:
3249
        _, link_name = result.payload
3250
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3251
                                  constants.HOTPLUG_TARGET_DISK,
3252
                                  disk, link_name, idx)
3253
        changes.append(("disk/%d" % idx, msg))
3254

    
3255
    return (disk, changes)
3256

    
3257
  def _ModifyDisk(self, idx, disk, params, _):
3258
    """Modifies a disk.
3259

3260
    """
3261
    changes = []
3262
    if constants.IDISK_MODE in params:
3263
      disk.mode = params.get(constants.IDISK_MODE)
3264
      changes.append(("disk.mode/%d" % idx, disk.mode))
3265

    
3266
    if constants.IDISK_NAME in params:
3267
      disk.name = params.get(constants.IDISK_NAME)
3268
      changes.append(("disk.name/%d" % idx, disk.name))
3269

    
3270
    # Modify arbitrary params in case instance template is ext
3271
    for key, value in params.iteritems():
3272
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3273
          self.instance.disk_template == constants.DT_EXT):
3274
        # stolen from GetUpdatedParams: default means reset/delete
3275
        if value.lower() == constants.VALUE_DEFAULT:
3276
          try:
3277
            del disk.params[key]
3278
          except KeyError:
3279
            pass
3280
        else:
3281
          disk.params[key] = value
3282
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3283

    
3284
    return changes
3285

    
3286
  def _RemoveDisk(self, idx, root, _):
3287
    """Removes a disk.
3288

3289
    """
3290
    hotmsg = ""
3291
    if self.op.hotplug:
3292
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3293
                                   constants.HOTPLUG_TARGET_DISK,
3294
                                   root, None, idx)
3295
      ShutdownInstanceDisks(self, self.instance, [root])
3296

    
3297
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3298
    for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3299
      if self.op.keep_disks and disk.dev_type in constants.DT_EXT:
3300
        continue
3301
      self.cfg.SetDiskID(disk, node)
3302
      msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3303
      if msg:
3304
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3305
                        " continuing anyway", idx, node, msg)
3306

    
3307
    # if this is a DRBD disk, return its port to the pool
3308
    if root.dev_type in constants.LDS_DRBD:
3309
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3310

    
3311
    return hotmsg
3312

    
3313
  def _CreateNewNic(self, idx, params, private):
3314
    """Creates data structure for a new network interface.
3315

3316
    """
3317
    mac = params[constants.INIC_MAC]
3318
    ip = params.get(constants.INIC_IP, None)
3319
    net = params.get(constants.INIC_NETWORK, None)
3320
    name = params.get(constants.INIC_NAME, None)
3321
    net_uuid = self.cfg.LookupNetwork(net)
3322
    #TODO: not private.filled?? can a nic have no nicparams??
3323
    nicparams = private.filled
3324
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3325
                       nicparams=nicparams)
3326
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3327

    
3328
    changes = [
3329
      ("nic.%d" % idx,
3330
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3331
       (mac, ip, private.filled[constants.NIC_MODE],
3332
       private.filled[constants.NIC_LINK], net)),
3333
      ]
3334

    
3335
    if self.op.hotplug:
3336
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3337
                                constants.HOTPLUG_TARGET_NIC,
3338
                                nobj, None, idx)
3339
      changes.append(("nic.%d" % idx, msg))
3340

    
3341
    return (nobj, changes)
3342

    
3343
  def _ApplyNicMods(self, idx, nic, params, private):
3344
    """Modifies a network interface.
3345

3346
    """
3347
    changes = []
3348

    
3349
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3350
      if key in params:
3351
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3352
        setattr(nic, key, params[key])
3353

    
3354
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3355
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3356
    if new_net_uuid != nic.network:
3357
      changes.append(("nic.network/%d" % idx, new_net))
3358
      nic.network = new_net_uuid
3359

    
3360
    if private.filled:
3361
      nic.nicparams = private.filled
3362

    
3363
      for (key, val) in nic.nicparams.items():
3364
        changes.append(("nic.%s/%d" % (key, idx), val))
3365

    
3366
    if self.op.hotplug:
3367
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3368
                                constants.HOTPLUG_TARGET_NIC,
3369
                                nic, None, idx)
3370
      changes.append(("nic/%d" % idx, msg))
3371

    
3372
    return changes
3373

    
3374
  def _RemoveNic(self, idx, nic, _):
3375
    if self.op.hotplug:
3376
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3377
                                 constants.HOTPLUG_TARGET_NIC,
3378
                                 nic, None, idx)
3379

    
3380
  def Exec(self, feedback_fn):
3381
    """Modifies an instance.
3382

3383
    All parameters take effect only at the next restart of the instance.
3384

3385
    """
3386
    # Process here the warnings from CheckPrereq, as we don't have a
3387
    # feedback_fn there.
3388
    # TODO: Replace with self.LogWarning
3389
    for warn in self.warn:
3390
      feedback_fn("WARNING: %s" % warn)
3391

    
3392
    assert ((self.op.disk_template is None) ^
3393
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3394
      "Not owning any node resource locks"
3395

    
3396
    result = []
3397
    instance = self.instance
3398

    
3399
    # New primary node
3400
    if self.op.pnode:
3401
      instance.primary_node = self.op.pnode
3402

    
3403
    # runtime memory
3404
    if self.op.runtime_mem:
3405
      rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3406
                                                     instance,
3407
                                                     self.op.runtime_mem)
3408
      rpcres.Raise("Cannot modify instance runtime memory")
3409
      result.append(("runtime_memory", self.op.runtime_mem))
3410

    
3411
    # Apply disk changes
3412
    _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3413
                        self._CreateNewDisk, self._ModifyDisk,
3414
                        self._RemoveDisk)
3415
    _UpdateIvNames(0, instance.disks)
3416

    
3417
    if self.op.disk_template:
3418
      if __debug__:
3419
        check_nodes = set(instance.all_nodes)
3420
        if self.op.remote_node:
3421
          check_nodes.add(self.op.remote_node)
3422
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3423
          owned = self.owned_locks(level)
3424
          assert not (check_nodes - owned), \
3425
            ("Not owning the correct locks, owning %r, expected at least %r" %
3426
             (owned, check_nodes))
3427

    
3428
      r_shut = ShutdownInstanceDisks(self, instance)
3429
      if not r_shut:
3430
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3431
                                 " proceed with disk template conversion")
3432
      mode = (instance.disk_template, self.op.disk_template)
3433
      try:
3434
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3435
      except:
3436
        self.cfg.ReleaseDRBDMinors(instance.name)
3437
        raise
3438
      result.append(("disk_template", self.op.disk_template))
3439

    
3440
      assert instance.disk_template == self.op.disk_template, \
3441
        ("Expected disk template '%s', found '%s'" %
3442
         (self.op.disk_template, instance.disk_template))
3443

    
3444
    # Release node and resource locks if there are any (they might already have
3445
    # been released during disk conversion)
3446
    ReleaseLocks(self, locking.LEVEL_NODE)
3447
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3448

    
3449
    # Apply NIC changes
3450
    if self._new_nics is not None:
3451
      instance.nics = self._new_nics
3452
      result.extend(self._nic_chgdesc)
3453

    
3454
    # hvparams changes
3455
    if self.op.hvparams:
3456
      instance.hvparams = self.hv_inst
3457
      for key, val in self.op.hvparams.iteritems():
3458
        result.append(("hv/%s" % key, val))
3459

    
3460
    # beparams changes
3461
    if self.op.beparams:
3462
      instance.beparams = self.be_inst
3463
      for key, val in self.op.beparams.iteritems():
3464
        result.append(("be/%s" % key, val))
3465

    
3466
    # OS change
3467
    if self.op.os_name:
3468
      instance.os = self.op.os_name
3469

    
3470
    # osparams changes
3471
    if self.op.osparams:
3472
      instance.osparams = self.os_inst
3473
      for key, val in self.op.osparams.iteritems():
3474
        result.append(("os/%s" % key, val))
3475

    
3476
    if self.op.offline is None:
3477
      # Ignore
3478
      pass
3479
    elif self.op.offline:
3480
      # Mark instance as offline
3481
      self.cfg.MarkInstanceOffline(instance.name)
3482
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3483
    else:
3484
      # Mark instance as online, but stopped
3485
      self.cfg.MarkInstanceDown(instance.name)
3486
      result.append(("admin_state", constants.ADMINST_DOWN))
3487

    
3488
    self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3489

    
3490
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3491
                self.owned_locks(locking.LEVEL_NODE)), \
3492
      "All node locks should have been released by now"
3493

    
3494
    return result
3495

    
3496
  _DISK_CONVERSIONS = {
3497
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3498
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3499
    }
3500

    
3501

    
3502
class LUInstanceChangeGroup(LogicalUnit):
3503
  HPATH = "instance-change-group"
3504
  HTYPE = constants.HTYPE_INSTANCE
3505
  REQ_BGL = False
3506

    
3507
  def ExpandNames(self):
3508
    self.share_locks = ShareAll()
3509

    
3510
    self.needed_locks = {
3511
      locking.LEVEL_NODEGROUP: [],
3512
      locking.LEVEL_NODE: [],
3513
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3514
      }
3515

    
3516
    self._ExpandAndLockInstance()
3517

    
3518
    if self.op.target_groups:
3519
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3520
                                  self.op.target_groups)
3521
    else:
3522
      self.req_target_uuids = None
3523

    
3524
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3525

    
3526
  def DeclareLocks(self, level):
3527
    if level == locking.LEVEL_NODEGROUP:
3528
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3529

    
3530
      if self.req_target_uuids:
3531
        lock_groups = set(self.req_target_uuids)
3532

    
3533
        # Lock all groups used by instance optimistically; this requires going
3534
        # via the node before it's locked, requiring verification later on
3535
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3536
        lock_groups.update(instance_groups)
3537
      else:
3538
        # No target groups, need to lock all of them
3539
        lock_groups = locking.ALL_SET
3540

    
3541
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3542

    
3543
    elif level == locking.LEVEL_NODE:
3544
      if self.req_target_uuids:
3545
        # Lock all nodes used by instances
3546
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3547
        self._LockInstancesNodes()
3548

    
3549
        # Lock all nodes in all potential target groups
3550
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3551
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3552
        member_nodes = [node_name
3553
                        for group in lock_groups
3554
                        for node_name in self.cfg.GetNodeGroup(group).members]
3555
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3556
      else:
3557
        # Lock all nodes as all groups are potential targets
3558
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3559

    
3560
  def CheckPrereq(self):
3561
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3562
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3563
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3564

    
3565
    assert (self.req_target_uuids is None or
3566
            owned_groups.issuperset(self.req_target_uuids))
3567
    assert owned_instances == set([self.op.instance_name])
3568

    
3569
    # Get instance information
3570
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3571

    
3572
    # Check if node groups for locked instance are still correct
3573
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3574
      ("Instance %s's nodes changed while we kept the lock" %
3575
       self.op.instance_name)
3576

    
3577
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3578
                                          owned_groups)
3579

    
3580
    if self.req_target_uuids:
3581
      # User requested specific target groups
3582
      self.target_uuids = frozenset(self.req_target_uuids)
3583
    else:
3584
      # All groups except those used by the instance are potential targets
3585
      self.target_uuids = owned_groups - inst_groups
3586

    
3587
    conflicting_groups = self.target_uuids & inst_groups
3588
    if conflicting_groups:
3589
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3590
                                 " used by the instance '%s'" %
3591
                                 (utils.CommaJoin(conflicting_groups),
3592
                                  self.op.instance_name),
3593
                                 errors.ECODE_INVAL)
3594

    
3595
    if not self.target_uuids:
3596
      raise errors.OpPrereqError("There are no possible target groups",
3597
                                 errors.ECODE_INVAL)
3598

    
3599
  def BuildHooksEnv(self):
3600
    """Build hooks env.
3601

3602
    """
3603
    assert self.target_uuids
3604

    
3605
    env = {
3606
      "TARGET_GROUPS": " ".join(self.target_uuids),
3607
      }
3608

    
3609
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3610

    
3611
    return env
3612

    
3613
  def BuildHooksNodes(self):
3614
    """Build hooks nodes.
3615

3616
    """
3617
    mn = self.cfg.GetMasterNode()
3618
    return ([mn], [mn])
3619

    
3620
  def Exec(self, feedback_fn):
3621
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3622

    
3623
    assert instances == [self.op.instance_name], "Instance not locked"
3624

    
3625
    req = iallocator.IAReqGroupChange(instances=instances,
3626
                                      target_groups=list(self.target_uuids))
3627
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3628

    
3629
    ial.Run(self.op.iallocator)
3630

    
3631
    if not ial.success:
3632
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3633
                                 " instance '%s' using iallocator '%s': %s" %
3634
                                 (self.op.instance_name, self.op.iallocator,
3635
                                  ial.info), errors.ECODE_NORES)
3636

    
3637
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3638

    
3639
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3640
                 " instance '%s'", len(jobs), self.op.instance_name)
3641

    
3642
    return ResultWithJobs(jobs)