Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ fc01b92b

History | View | Annotate | Download (141.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
64

    
65
import ganeti.masterd.instance
66

    
67

    
68
#: Type description for changes as returned by L{_ApplyContainerMods}'s
69
#: callbacks
70
_TApplyContModsCbChanges = \
71
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
72
    ht.TNonEmptyString,
73
    ht.TAny,
74
    ])))
75

    
76

    
77
def _CheckHostnameSane(lu, name):
78
  """Ensures that a given hostname resolves to a 'sane' name.
79

80
  The given name is required to be a prefix of the resolved hostname,
81
  to prevent accidental mismatches.
82

83
  @param lu: the logical unit on behalf of which we're checking
84
  @param name: the name we should resolve and check
85
  @return: the resolved hostname object
86

87
  """
88
  hostname = netutils.GetHostname(name=name)
89
  if hostname.name != name:
90
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91
  if not utils.MatchNameComponent(name, [hostname.name]):
92
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93
                                " same as given hostname '%s'") %
94
                               (hostname.name, name), errors.ECODE_INVAL)
95
  return hostname
96

    
97

    
98
def _CheckOpportunisticLocking(op):
99
  """Generate error if opportunistic locking is not possible.
100

101
  """
102
  if op.opportunistic_locking and not op.iallocator:
103
    raise errors.OpPrereqError("Opportunistic locking is only available in"
104
                               " combination with an instance allocator",
105
                               errors.ECODE_INVAL)
106

    
107

    
108
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109
  """Wrapper around IAReqInstanceAlloc.
110

111
  @param op: The instance opcode
112
  @param disks: The computed disks
113
  @param nics: The computed nics
114
  @param beparams: The full filled beparams
115
  @param node_whitelist: List of nodes which should appear as online to the
116
    allocator (unless the node is already marked offline)
117

118
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
119

120
  """
121
  spindle_use = beparams[constants.BE_SPINDLE_USE]
122
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123
                                       disk_template=op.disk_template,
124
                                       tags=op.tags,
125
                                       os=op.os_type,
126
                                       vcpus=beparams[constants.BE_VCPUS],
127
                                       memory=beparams[constants.BE_MAXMEM],
128
                                       spindle_use=spindle_use,
129
                                       disks=disks,
130
                                       nics=[n.ToDict() for n in nics],
131
                                       hypervisor=op.hypervisor,
132
                                       node_whitelist=node_whitelist)
133

    
134

    
135
def _ComputeFullBeParams(op, cluster):
136
  """Computes the full beparams.
137

138
  @param op: The instance opcode
139
  @param cluster: The cluster config object
140

141
  @return: The fully filled beparams
142

143
  """
144
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
145
  for param, value in op.beparams.iteritems():
146
    if value == constants.VALUE_AUTO:
147
      op.beparams[param] = default_beparams[param]
148
  objects.UpgradeBeParams(op.beparams)
149
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150
  return cluster.SimpleFillBE(op.beparams)
151

    
152

    
153
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154
  """Computes the nics.
155

156
  @param op: The instance opcode
157
  @param cluster: Cluster configuration object
158
  @param default_ip: The default ip to assign
159
  @param cfg: An instance of the configuration object
160
  @param ec_id: Execution context ID
161

162
  @returns: The build up nics
163

164
  """
165
  nics = []
166
  for nic in op.nics:
167
    nic_mode_req = nic.get(constants.INIC_MODE, None)
168
    nic_mode = nic_mode_req
169
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
171

    
172
    net = nic.get(constants.INIC_NETWORK, None)
173
    link = nic.get(constants.NIC_LINK, None)
174
    ip = nic.get(constants.INIC_IP, None)
175

    
176
    if net is None or net.lower() == constants.VALUE_NONE:
177
      net = None
178
    else:
179
      if nic_mode_req is not None or link is not None:
180
        raise errors.OpPrereqError("If network is given, no mode or link"
181
                                   " is allowed to be passed",
182
                                   errors.ECODE_INVAL)
183

    
184
    # ip validity checks
185
    if ip is None or ip.lower() == constants.VALUE_NONE:
186
      nic_ip = None
187
    elif ip.lower() == constants.VALUE_AUTO:
188
      if not op.name_check:
189
        raise errors.OpPrereqError("IP address set to auto but name checks"
190
                                   " have been skipped",
191
                                   errors.ECODE_INVAL)
192
      nic_ip = default_ip
193
    else:
194
      # We defer pool operations until later, so that the iallocator has
195
      # filled in the instance's node(s) dimara
196
      if ip.lower() == constants.NIC_IP_POOL:
197
        if net is None:
198
          raise errors.OpPrereqError("if ip=pool, parameter network"
199
                                     " must be passed too",
200
                                     errors.ECODE_INVAL)
201

    
202
      elif not netutils.IPAddress.IsValid(ip):
203
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
204
                                   errors.ECODE_INVAL)
205

    
206
      nic_ip = ip
207

    
208
    # TODO: check the ip address for uniqueness
209
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
211
                                 errors.ECODE_INVAL)
212

    
213
    # MAC address verification
214
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216
      mac = utils.NormalizeAndValidateMac(mac)
217

    
218
      try:
219
        # TODO: We need to factor this out
220
        cfg.ReserveMAC(mac, ec_id)
221
      except errors.ReservationError:
222
        raise errors.OpPrereqError("MAC address %s already in use"
223
                                   " in cluster" % mac,
224
                                   errors.ECODE_NOTUNIQUE)
225

    
226
    #  Build nic parameters
227
    nicparams = {}
228
    if nic_mode_req:
229
      nicparams[constants.NIC_MODE] = nic_mode
230
    if link:
231
      nicparams[constants.NIC_LINK] = link
232

    
233
    check_params = cluster.SimpleFillNIC(nicparams)
234
    objects.NIC.CheckParameterSyntax(check_params)
235
    net_uuid = cfg.LookupNetwork(net)
236
    name = nic.get(constants.INIC_NAME, None)
237
    if name is not None and name.lower() == constants.VALUE_NONE:
238
      name = None
239
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240
                          network=net_uuid, nicparams=nicparams)
241
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
242
    nics.append(nic_obj)
243

    
244
  return nics
245

    
246

    
247
def _CheckForConflictingIp(lu, ip, node):
248
  """In case of conflicting IP address raise error.
249

250
  @type ip: string
251
  @param ip: IP address
252
  @type node: string
253
  @param node: node name
254

255
  """
256
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257
  if conf_net is not None:
258
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259
                                " network %s, but the target NIC does not." %
260
                                (ip, conf_net)),
261
                               errors.ECODE_STATE)
262

    
263
  return (None, None)
264

    
265

    
266
def _ComputeIPolicyInstanceSpecViolation(
267
  ipolicy, instance_spec, disk_template,
268
  _compute_fn=ComputeIPolicySpecViolation):
269
  """Compute if instance specs meets the specs of ipolicy.
270

271
  @type ipolicy: dict
272
  @param ipolicy: The ipolicy to verify against
273
  @param instance_spec: dict
274
  @param instance_spec: The instance spec to verify
275
  @type disk_template: string
276
  @param disk_template: the disk template of the instance
277
  @param _compute_fn: The function to verify ipolicy (unittest only)
278
  @see: L{ComputeIPolicySpecViolation}
279

280
  """
281
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
287

    
288
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289
                     disk_sizes, spindle_use, disk_template)
290

    
291

    
292
def _CheckOSVariant(os_obj, name):
293
  """Check whether an OS name conforms to the os variants specification.
294

295
  @type os_obj: L{objects.OS}
296
  @param os_obj: OS object to check
297
  @type name: string
298
  @param name: OS name passed by the user, to check for validity
299

300
  """
301
  variant = objects.OS.GetVariant(name)
302
  if not os_obj.supported_variants:
303
    if variant:
304
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305
                                 " passed)" % (os_obj.name, variant),
306
                                 errors.ECODE_INVAL)
307
    return
308
  if not variant:
309
    raise errors.OpPrereqError("OS name must include a variant",
310
                               errors.ECODE_INVAL)
311

    
312
  if variant not in os_obj.supported_variants:
313
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
314

    
315

    
316
class LUInstanceCreate(LogicalUnit):
317
  """Create an instance.
318

319
  """
320
  HPATH = "instance-add"
321
  HTYPE = constants.HTYPE_INSTANCE
322
  REQ_BGL = False
323

    
324
  def CheckArguments(self):
325
    """Check arguments.
326

327
    """
328
    # do not require name_check to ease forward/backward compatibility
329
    # for tools
330
    if self.op.no_install and self.op.start:
331
      self.LogInfo("No-installation mode selected, disabling startup")
332
      self.op.start = False
333
    # validate/normalize the instance name
334
    self.op.instance_name = \
335
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
336

    
337
    if self.op.ip_check and not self.op.name_check:
338
      # TODO: make the ip check more flexible and not depend on the name check
339
      raise errors.OpPrereqError("Cannot do IP address check without a name"
340
                                 " check", errors.ECODE_INVAL)
341

    
342
    # check nics' parameter names
343
    for nic in self.op.nics:
344
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345
    # check that NIC's parameters names are unique and valid
346
    utils.ValidateDeviceNames("NIC", self.op.nics)
347

    
348
    # check that disk's names are unique and valid
349
    utils.ValidateDeviceNames("disk", self.op.disks)
350

    
351
    cluster = self.cfg.GetClusterInfo()
352
    if not self.op.disk_template in cluster.enabled_disk_templates:
353
      raise errors.OpPrereqError("Cannot create an instance with disk template"
354
                                 " '%s', because it is not enabled in the"
355
                                 " cluster. Enabled disk templates are: %s." %
356
                                 (self.op.disk_template,
357
                                  ",".join(cluster.enabled_disk_templates)))
358

    
359
    # check disks. parameter names and consistent adopt/no-adopt strategy
360
    has_adopt = has_no_adopt = False
361
    for disk in self.op.disks:
362
      if self.op.disk_template != constants.DT_EXT:
363
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364
      if constants.IDISK_ADOPT in disk:
365
        has_adopt = True
366
      else:
367
        has_no_adopt = True
368
    if has_adopt and has_no_adopt:
369
      raise errors.OpPrereqError("Either all disks are adopted or none is",
370
                                 errors.ECODE_INVAL)
371
    if has_adopt:
372
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373
        raise errors.OpPrereqError("Disk adoption is not supported for the"
374
                                   " '%s' disk template" %
375
                                   self.op.disk_template,
376
                                   errors.ECODE_INVAL)
377
      if self.op.iallocator is not None:
378
        raise errors.OpPrereqError("Disk adoption not allowed with an"
379
                                   " iallocator script", errors.ECODE_INVAL)
380
      if self.op.mode == constants.INSTANCE_IMPORT:
381
        raise errors.OpPrereqError("Disk adoption not allowed for"
382
                                   " instance import", errors.ECODE_INVAL)
383
    else:
384
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
385
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386
                                   " but no 'adopt' parameter given" %
387
                                   self.op.disk_template,
388
                                   errors.ECODE_INVAL)
389

    
390
    self.adopt_disks = has_adopt
391

    
392
    # instance name verification
393
    if self.op.name_check:
394
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395
      self.op.instance_name = self.hostname1.name
396
      # used in CheckPrereq for ip ping check
397
      self.check_ip = self.hostname1.ip
398
    else:
399
      self.check_ip = None
400

    
401
    # file storage checks
402
    if (self.op.file_driver and
403
        not self.op.file_driver in constants.FILE_DRIVER):
404
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
405
                                 self.op.file_driver, errors.ECODE_INVAL)
406

    
407
    # set default file_driver if unset and required
408
    if (not self.op.file_driver and
409
        self.op.disk_template in [constants.DT_FILE,
410
                                  constants.DT_SHARED_FILE]):
411
      self.op.file_driver = constants.FD_DEFAULT
412

    
413
    if self.op.disk_template == constants.DT_FILE:
414
      opcodes.RequireFileStorage()
415
    elif self.op.disk_template == constants.DT_SHARED_FILE:
416
      opcodes.RequireSharedFileStorage()
417

    
418
    ### Node/iallocator related checks
419
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
420

    
421
    if self.op.pnode is not None:
422
      if self.op.disk_template in constants.DTS_INT_MIRROR:
423
        if self.op.snode is None:
424
          raise errors.OpPrereqError("The networked disk templates need"
425
                                     " a mirror node", errors.ECODE_INVAL)
426
      elif self.op.snode:
427
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
428
                        " template")
429
        self.op.snode = None
430

    
431
    _CheckOpportunisticLocking(self.op)
432

    
433
    self._cds = GetClusterDomainSecret()
434

    
435
    if self.op.mode == constants.INSTANCE_IMPORT:
436
      # On import force_variant must be True, because if we forced it at
437
      # initial install, our only chance when importing it back is that it
438
      # works again!
439
      self.op.force_variant = True
440

    
441
      if self.op.no_install:
442
        self.LogInfo("No-installation mode has no effect during import")
443

    
444
    elif self.op.mode == constants.INSTANCE_CREATE:
445
      if self.op.os_type is None:
446
        raise errors.OpPrereqError("No guest OS specified",
447
                                   errors.ECODE_INVAL)
448
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
449
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
450
                                   " installation" % self.op.os_type,
451
                                   errors.ECODE_STATE)
452
      if self.op.disk_template is None:
453
        raise errors.OpPrereqError("No disk template specified",
454
                                   errors.ECODE_INVAL)
455

    
456
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
457
      # Check handshake to ensure both clusters have the same domain secret
458
      src_handshake = self.op.source_handshake
459
      if not src_handshake:
460
        raise errors.OpPrereqError("Missing source handshake",
461
                                   errors.ECODE_INVAL)
462

    
463
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
464
                                                           src_handshake)
465
      if errmsg:
466
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
467
                                   errors.ECODE_INVAL)
468

    
469
      # Load and check source CA
470
      self.source_x509_ca_pem = self.op.source_x509_ca
471
      if not self.source_x509_ca_pem:
472
        raise errors.OpPrereqError("Missing source X509 CA",
473
                                   errors.ECODE_INVAL)
474

    
475
      try:
476
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
477
                                                    self._cds)
478
      except OpenSSL.crypto.Error, err:
479
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
480
                                   (err, ), errors.ECODE_INVAL)
481

    
482
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
483
      if errcode is not None:
484
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
485
                                   errors.ECODE_INVAL)
486

    
487
      self.source_x509_ca = cert
488

    
489
      src_instance_name = self.op.source_instance_name
490
      if not src_instance_name:
491
        raise errors.OpPrereqError("Missing source instance name",
492
                                   errors.ECODE_INVAL)
493

    
494
      self.source_instance_name = \
495
        netutils.GetHostname(name=src_instance_name).name
496

    
497
    else:
498
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
499
                                 self.op.mode, errors.ECODE_INVAL)
500

    
501
  def ExpandNames(self):
502
    """ExpandNames for CreateInstance.
503

504
    Figure out the right locks for instance creation.
505

506
    """
507
    self.needed_locks = {}
508

    
509
    instance_name = self.op.instance_name
510
    # this is just a preventive check, but someone might still add this
511
    # instance in the meantime, and creation will fail at lock-add time
512
    if instance_name in self.cfg.GetInstanceList():
513
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
514
                                 instance_name, errors.ECODE_EXISTS)
515

    
516
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
517

    
518
    if self.op.iallocator:
519
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
520
      # specifying a group on instance creation and then selecting nodes from
521
      # that group
522
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
523
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
524

    
525
      if self.op.opportunistic_locking:
526
        self.opportunistic_locks[locking.LEVEL_NODE] = True
527
    else:
528
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
529
      nodelist = [self.op.pnode]
530
      if self.op.snode is not None:
531
        self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
532
        nodelist.append(self.op.snode)
533
      self.needed_locks[locking.LEVEL_NODE] = nodelist
534

    
535
    # in case of import lock the source node too
536
    if self.op.mode == constants.INSTANCE_IMPORT:
537
      src_node = self.op.src_node
538
      src_path = self.op.src_path
539

    
540
      if src_path is None:
541
        self.op.src_path = src_path = self.op.instance_name
542

    
543
      if src_node is None:
544
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
545
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
546
        self.op.src_node = None
547
        if os.path.isabs(src_path):
548
          raise errors.OpPrereqError("Importing an instance from a path"
549
                                     " requires a source node option",
550
                                     errors.ECODE_INVAL)
551
      else:
552
        self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
553
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
554
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
555
        if not os.path.isabs(src_path):
556
          self.op.src_path = src_path = \
557
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
558

    
559
    self.needed_locks[locking.LEVEL_NODE_RES] = \
560
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
561

    
562
    # Optimistically acquire shared group locks (we're reading the
563
    # configuration).  We can't just call GetInstanceNodeGroups, because the
564
    # instance doesn't exist yet. Therefore we lock all node groups of all
565
    # nodes we have.
566
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
567
      # In the case we lock all nodes for opportunistic allocation, we have no
568
      # choice than to lock all groups, because they're allocated before nodes.
569
      # This is sad, but true. At least we release all those we don't need in
570
      # CheckPrereq later.
571
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
572
    else:
573
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
574
        list(self.cfg.GetNodeGroupsFromNodes(
575
          self.needed_locks[locking.LEVEL_NODE]))
576
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
577

    
578
  def DeclareLocks(self, level):
579
    if level == locking.LEVEL_NODE_RES and \
580
      self.opportunistic_locks[locking.LEVEL_NODE]:
581
      # Even when using opportunistic locking, we require the same set of
582
      # NODE_RES locks as we got NODE locks
583
      self.needed_locks[locking.LEVEL_NODE_RES] = \
584
        self.owned_locks(locking.LEVEL_NODE)
585

    
586
  def _RunAllocator(self):
587
    """Run the allocator based on input opcode.
588

589
    """
590
    if self.op.opportunistic_locking:
591
      # Only consider nodes for which a lock is held
592
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
593
    else:
594
      node_whitelist = None
595

    
596
    #TODO Export network to iallocator so that it chooses a pnode
597
    #     in a nodegroup that has the desired network connected to
598
    req = _CreateInstanceAllocRequest(self.op, self.disks,
599
                                      self.nics, self.be_full,
600
                                      node_whitelist)
601
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
602

    
603
    ial.Run(self.op.iallocator)
604

    
605
    if not ial.success:
606
      # When opportunistic locks are used only a temporary failure is generated
607
      if self.op.opportunistic_locking:
608
        ecode = errors.ECODE_TEMP_NORES
609
      else:
610
        ecode = errors.ECODE_NORES
611

    
612
      raise errors.OpPrereqError("Can't compute nodes using"
613
                                 " iallocator '%s': %s" %
614
                                 (self.op.iallocator, ial.info),
615
                                 ecode)
616

    
617
    self.op.pnode = ial.result[0]
618
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
619
                 self.op.instance_name, self.op.iallocator,
620
                 utils.CommaJoin(ial.result))
621

    
622
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
623

    
624
    if req.RequiredNodes() == 2:
625
      self.op.snode = ial.result[1]
626

    
627
  def BuildHooksEnv(self):
628
    """Build hooks env.
629

630
    This runs on master, primary and secondary nodes of the instance.
631

632
    """
633
    env = {
634
      "ADD_MODE": self.op.mode,
635
      }
636
    if self.op.mode == constants.INSTANCE_IMPORT:
637
      env["SRC_NODE"] = self.op.src_node
638
      env["SRC_PATH"] = self.op.src_path
639
      env["SRC_IMAGES"] = self.src_images
640

    
641
    env.update(BuildInstanceHookEnv(
642
      name=self.op.instance_name,
643
      primary_node=self.op.pnode,
644
      secondary_nodes=self.secondaries,
645
      status=self.op.start,
646
      os_type=self.op.os_type,
647
      minmem=self.be_full[constants.BE_MINMEM],
648
      maxmem=self.be_full[constants.BE_MAXMEM],
649
      vcpus=self.be_full[constants.BE_VCPUS],
650
      nics=NICListToTuple(self, self.nics),
651
      disk_template=self.op.disk_template,
652
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
653
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
654
             for d in self.disks],
655
      bep=self.be_full,
656
      hvp=self.hv_full,
657
      hypervisor_name=self.op.hypervisor,
658
      tags=self.op.tags,
659
      ))
660

    
661
    return env
662

    
663
  def BuildHooksNodes(self):
664
    """Build hooks nodes.
665

666
    """
667
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
668
    return nl, nl
669

    
670
  def _ReadExportInfo(self):
671
    """Reads the export information from disk.
672

673
    It will override the opcode source node and path with the actual
674
    information, if these two were not specified before.
675

676
    @return: the export information
677

678
    """
679
    assert self.op.mode == constants.INSTANCE_IMPORT
680

    
681
    src_node = self.op.src_node
682
    src_path = self.op.src_path
683

    
684
    if src_node is None:
685
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
686
      exp_list = self.rpc.call_export_list(locked_nodes)
687
      found = False
688
      for node in exp_list:
689
        if exp_list[node].fail_msg:
690
          continue
691
        if src_path in exp_list[node].payload:
692
          found = True
693
          self.op.src_node = src_node = node
694
          self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
695
                                                       src_path)
696
          break
697
      if not found:
698
        raise errors.OpPrereqError("No export found for relative path %s" %
699
                                   src_path, errors.ECODE_INVAL)
700

    
701
    CheckNodeOnline(self, src_node)
702
    result = self.rpc.call_export_info(src_node, src_path)
703
    result.Raise("No export or invalid export found in dir %s" % src_path)
704

    
705
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
706
    if not export_info.has_section(constants.INISECT_EXP):
707
      raise errors.ProgrammerError("Corrupted export config",
708
                                   errors.ECODE_ENVIRON)
709

    
710
    ei_version = export_info.get(constants.INISECT_EXP, "version")
711
    if (int(ei_version) != constants.EXPORT_VERSION):
712
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
713
                                 (ei_version, constants.EXPORT_VERSION),
714
                                 errors.ECODE_ENVIRON)
715
    return export_info
716

    
717
  def _ReadExportParams(self, einfo):
718
    """Use export parameters as defaults.
719

720
    In case the opcode doesn't specify (as in override) some instance
721
    parameters, then try to use them from the export information, if
722
    that declares them.
723

724
    """
725
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
726

    
727
    if self.op.disk_template is None:
728
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
729
        self.op.disk_template = einfo.get(constants.INISECT_INS,
730
                                          "disk_template")
731
        if self.op.disk_template not in constants.DISK_TEMPLATES:
732
          raise errors.OpPrereqError("Disk template specified in configuration"
733
                                     " file is not one of the allowed values:"
734
                                     " %s" %
735
                                     " ".join(constants.DISK_TEMPLATES),
736
                                     errors.ECODE_INVAL)
737
      else:
738
        raise errors.OpPrereqError("No disk template specified and the export"
739
                                   " is missing the disk_template information",
740
                                   errors.ECODE_INVAL)
741

    
742
    if not self.op.disks:
743
      disks = []
744
      # TODO: import the disk iv_name too
745
      for idx in range(constants.MAX_DISKS):
746
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
747
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
748
          disks.append({constants.IDISK_SIZE: disk_sz})
749
      self.op.disks = disks
750
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
751
        raise errors.OpPrereqError("No disk info specified and the export"
752
                                   " is missing the disk information",
753
                                   errors.ECODE_INVAL)
754

    
755
    if not self.op.nics:
756
      nics = []
757
      for idx in range(constants.MAX_NICS):
758
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
759
          ndict = {}
760
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
761
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
762
            ndict[name] = v
763
          nics.append(ndict)
764
        else:
765
          break
766
      self.op.nics = nics
767

    
768
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
769
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
770

    
771
    if (self.op.hypervisor is None and
772
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
773
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
774

    
775
    if einfo.has_section(constants.INISECT_HYP):
776
      # use the export parameters but do not override the ones
777
      # specified by the user
778
      for name, value in einfo.items(constants.INISECT_HYP):
779
        if name not in self.op.hvparams:
780
          self.op.hvparams[name] = value
781

    
782
    if einfo.has_section(constants.INISECT_BEP):
783
      # use the parameters, without overriding
784
      for name, value in einfo.items(constants.INISECT_BEP):
785
        if name not in self.op.beparams:
786
          self.op.beparams[name] = value
787
        # Compatibility for the old "memory" be param
788
        if name == constants.BE_MEMORY:
789
          if constants.BE_MAXMEM not in self.op.beparams:
790
            self.op.beparams[constants.BE_MAXMEM] = value
791
          if constants.BE_MINMEM not in self.op.beparams:
792
            self.op.beparams[constants.BE_MINMEM] = value
793
    else:
794
      # try to read the parameters old style, from the main section
795
      for name in constants.BES_PARAMETERS:
796
        if (name not in self.op.beparams and
797
            einfo.has_option(constants.INISECT_INS, name)):
798
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
799

    
800
    if einfo.has_section(constants.INISECT_OSP):
801
      # use the parameters, without overriding
802
      for name, value in einfo.items(constants.INISECT_OSP):
803
        if name not in self.op.osparams:
804
          self.op.osparams[name] = value
805

    
806
  def _RevertToDefaults(self, cluster):
807
    """Revert the instance parameters to the default values.
808

809
    """
810
    # hvparams
811
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
812
    for name in self.op.hvparams.keys():
813
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
814
        del self.op.hvparams[name]
815
    # beparams
816
    be_defs = cluster.SimpleFillBE({})
817
    for name in self.op.beparams.keys():
818
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
819
        del self.op.beparams[name]
820
    # nic params
821
    nic_defs = cluster.SimpleFillNIC({})
822
    for nic in self.op.nics:
823
      for name in constants.NICS_PARAMETERS:
824
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
825
          del nic[name]
826
    # osparams
827
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
828
    for name in self.op.osparams.keys():
829
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
830
        del self.op.osparams[name]
831

    
832
  def _CalculateFileStorageDir(self):
833
    """Calculate final instance file storage dir.
834

835
    """
836
    # file storage dir calculation/check
837
    self.instance_file_storage_dir = None
838
    if self.op.disk_template in constants.DTS_FILEBASED:
839
      # build the full file storage dir path
840
      joinargs = []
841

    
842
      if self.op.disk_template == constants.DT_SHARED_FILE:
843
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
844
      else:
845
        get_fsd_fn = self.cfg.GetFileStorageDir
846

    
847
      cfg_storagedir = get_fsd_fn()
848
      if not cfg_storagedir:
849
        raise errors.OpPrereqError("Cluster file storage dir not defined",
850
                                   errors.ECODE_STATE)
851
      joinargs.append(cfg_storagedir)
852

    
853
      if self.op.file_storage_dir is not None:
854
        joinargs.append(self.op.file_storage_dir)
855

    
856
      joinargs.append(self.op.instance_name)
857

    
858
      # pylint: disable=W0142
859
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
860

    
861
  def CheckPrereq(self): # pylint: disable=R0914
862
    """Check prerequisites.
863

864
    """
865
    # Check that the optimistically acquired groups are correct wrt the
866
    # acquired nodes
867
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
868
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
869
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
870
    if not owned_groups.issuperset(cur_groups):
871
      raise errors.OpPrereqError("New instance %s's node groups changed since"
872
                                 " locks were acquired, current groups are"
873
                                 " are '%s', owning groups '%s'; retry the"
874
                                 " operation" %
875
                                 (self.op.instance_name,
876
                                  utils.CommaJoin(cur_groups),
877
                                  utils.CommaJoin(owned_groups)),
878
                                 errors.ECODE_STATE)
879

    
880
    self._CalculateFileStorageDir()
881

    
882
    if self.op.mode == constants.INSTANCE_IMPORT:
883
      export_info = self._ReadExportInfo()
884
      self._ReadExportParams(export_info)
885
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
886
    else:
887
      self._old_instance_name = None
888

    
889
    if (not self.cfg.GetVGName() and
890
        self.op.disk_template not in constants.DTS_NOT_LVM):
891
      raise errors.OpPrereqError("Cluster does not support lvm-based"
892
                                 " instances", errors.ECODE_STATE)
893

    
894
    if (self.op.hypervisor is None or
895
        self.op.hypervisor == constants.VALUE_AUTO):
896
      self.op.hypervisor = self.cfg.GetHypervisorType()
897

    
898
    cluster = self.cfg.GetClusterInfo()
899
    enabled_hvs = cluster.enabled_hypervisors
900
    if self.op.hypervisor not in enabled_hvs:
901
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
902
                                 " cluster (%s)" %
903
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
904
                                 errors.ECODE_STATE)
905

    
906
    # Check tag validity
907
    for tag in self.op.tags:
908
      objects.TaggableObject.ValidateTag(tag)
909

    
910
    # check hypervisor parameter syntax (locally)
911
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
912
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
913
                                      self.op.hvparams)
914
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
915
    hv_type.CheckParameterSyntax(filled_hvp)
916
    self.hv_full = filled_hvp
917
    # check that we don't specify global parameters on an instance
918
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
919
                         "instance", "cluster")
920

    
921
    # fill and remember the beparams dict
922
    self.be_full = _ComputeFullBeParams(self.op, cluster)
923

    
924
    # build os parameters
925
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
926

    
927
    # now that hvp/bep are in final format, let's reset to defaults,
928
    # if told to do so
929
    if self.op.identify_defaults:
930
      self._RevertToDefaults(cluster)
931

    
932
    # NIC buildup
933
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
934
                             self.proc.GetECId())
935

    
936
    # disk checks/pre-build
937
    default_vg = self.cfg.GetVGName()
938
    self.disks = ComputeDisks(self.op, default_vg)
939

    
940
    if self.op.mode == constants.INSTANCE_IMPORT:
941
      disk_images = []
942
      for idx in range(len(self.disks)):
943
        option = "disk%d_dump" % idx
944
        if export_info.has_option(constants.INISECT_INS, option):
945
          # FIXME: are the old os-es, disk sizes, etc. useful?
946
          export_name = export_info.get(constants.INISECT_INS, option)
947
          image = utils.PathJoin(self.op.src_path, export_name)
948
          disk_images.append(image)
949
        else:
950
          disk_images.append(False)
951

    
952
      self.src_images = disk_images
953

    
954
      if self.op.instance_name == self._old_instance_name:
955
        for idx, nic in enumerate(self.nics):
956
          if nic.mac == constants.VALUE_AUTO:
957
            nic_mac_ini = "nic%d_mac" % idx
958
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
959

    
960
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
961

    
962
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
963
    if self.op.ip_check:
964
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
965
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
966
                                   (self.check_ip, self.op.instance_name),
967
                                   errors.ECODE_NOTUNIQUE)
968

    
969
    #### mac address generation
970
    # By generating here the mac address both the allocator and the hooks get
971
    # the real final mac address rather than the 'auto' or 'generate' value.
972
    # There is a race condition between the generation and the instance object
973
    # creation, which means that we know the mac is valid now, but we're not
974
    # sure it will be when we actually add the instance. If things go bad
975
    # adding the instance will abort because of a duplicate mac, and the
976
    # creation job will fail.
977
    for nic in self.nics:
978
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
979
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
980

    
981
    #### allocator run
982

    
983
    if self.op.iallocator is not None:
984
      self._RunAllocator()
985

    
986
    # Release all unneeded node locks
987
    keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
988
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
989
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
990
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
991
    # Release all unneeded group locks
992
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
993
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
994

    
995
    assert (self.owned_locks(locking.LEVEL_NODE) ==
996
            self.owned_locks(locking.LEVEL_NODE_RES)), \
997
      "Node locks differ from node resource locks"
998

    
999
    #### node related checks
1000

    
1001
    # check primary node
1002
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
1003
    assert self.pnode is not None, \
1004
      "Cannot retrieve locked node %s" % self.op.pnode
1005
    if pnode.offline:
1006
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1007
                                 pnode.name, errors.ECODE_STATE)
1008
    if pnode.drained:
1009
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1010
                                 pnode.name, errors.ECODE_STATE)
1011
    if not pnode.vm_capable:
1012
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1013
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1014

    
1015
    self.secondaries = []
1016

    
1017
    # Fill in any IPs from IP pools. This must happen here, because we need to
1018
    # know the nic's primary node, as specified by the iallocator
1019
    for idx, nic in enumerate(self.nics):
1020
      net_uuid = nic.network
1021
      if net_uuid is not None:
1022
        nobj = self.cfg.GetNetwork(net_uuid)
1023
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
1024
        if netparams is None:
1025
          raise errors.OpPrereqError("No netparams found for network"
1026
                                     " %s. Propably not connected to"
1027
                                     " node's %s nodegroup" %
1028
                                     (nobj.name, self.pnode.name),
1029
                                     errors.ECODE_INVAL)
1030
        self.LogInfo("NIC/%d inherits netparams %s" %
1031
                     (idx, netparams.values()))
1032
        nic.nicparams = dict(netparams)
1033
        if nic.ip is not None:
1034
          if nic.ip.lower() == constants.NIC_IP_POOL:
1035
            try:
1036
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1037
            except errors.ReservationError:
1038
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1039
                                         " from the address pool" % idx,
1040
                                         errors.ECODE_STATE)
1041
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1042
          else:
1043
            try:
1044
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1045
                                 check=self.op.conflicts_check)
1046
            except errors.ReservationError:
1047
              raise errors.OpPrereqError("IP address %s already in use"
1048
                                         " or does not belong to network %s" %
1049
                                         (nic.ip, nobj.name),
1050
                                         errors.ECODE_NOTUNIQUE)
1051

    
1052
      # net is None, ip None or given
1053
      elif self.op.conflicts_check:
1054
        _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1055

    
1056
    # mirror node verification
1057
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1058
      if self.op.snode == pnode.name:
1059
        raise errors.OpPrereqError("The secondary node cannot be the"
1060
                                   " primary node", errors.ECODE_INVAL)
1061
      CheckNodeOnline(self, self.op.snode)
1062
      CheckNodeNotDrained(self, self.op.snode)
1063
      CheckNodeVmCapable(self, self.op.snode)
1064
      self.secondaries.append(self.op.snode)
1065

    
1066
      snode = self.cfg.GetNodeInfo(self.op.snode)
1067
      if pnode.group != snode.group:
1068
        self.LogWarning("The primary and secondary nodes are in two"
1069
                        " different node groups; the disk parameters"
1070
                        " from the first disk's node group will be"
1071
                        " used")
1072

    
1073
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1074
      nodes = [pnode]
1075
      if self.op.disk_template in constants.DTS_INT_MIRROR:
1076
        nodes.append(snode)
1077
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1078
      if compat.any(map(has_es, nodes)):
1079
        raise errors.OpPrereqError("Disk template %s not supported with"
1080
                                   " exclusive storage" % self.op.disk_template,
1081
                                   errors.ECODE_STATE)
1082

    
1083
    nodenames = [pnode.name] + self.secondaries
1084

    
1085
    if not self.adopt_disks:
1086
      if self.op.disk_template == constants.DT_RBD:
1087
        # _CheckRADOSFreeSpace() is just a placeholder.
1088
        # Any function that checks prerequisites can be placed here.
1089
        # Check if there is enough space on the RADOS cluster.
1090
        CheckRADOSFreeSpace()
1091
      elif self.op.disk_template == constants.DT_EXT:
1092
        # FIXME: Function that checks prereqs if needed
1093
        pass
1094
      else:
1095
        # Check lv size requirements, if not adopting
1096
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1097
        CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1098

    
1099
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1100
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1101
                                disk[constants.IDISK_ADOPT])
1102
                     for disk in self.disks])
1103
      if len(all_lvs) != len(self.disks):
1104
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1105
                                   errors.ECODE_INVAL)
1106
      for lv_name in all_lvs:
1107
        try:
1108
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1109
          # to ReserveLV uses the same syntax
1110
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1111
        except errors.ReservationError:
1112
          raise errors.OpPrereqError("LV named %s used by another instance" %
1113
                                     lv_name, errors.ECODE_NOTUNIQUE)
1114

    
1115
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1116
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1117

    
1118
      node_lvs = self.rpc.call_lv_list([pnode.name],
1119
                                       vg_names.payload.keys())[pnode.name]
1120
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1121
      node_lvs = node_lvs.payload
1122

    
1123
      delta = all_lvs.difference(node_lvs.keys())
1124
      if delta:
1125
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1126
                                   utils.CommaJoin(delta),
1127
                                   errors.ECODE_INVAL)
1128
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1129
      if online_lvs:
1130
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1131
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1132
                                   errors.ECODE_STATE)
1133
      # update the size of disk based on what is found
1134
      for dsk in self.disks:
1135
        dsk[constants.IDISK_SIZE] = \
1136
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1137
                                        dsk[constants.IDISK_ADOPT])][0]))
1138

    
1139
    elif self.op.disk_template == constants.DT_BLOCK:
1140
      # Normalize and de-duplicate device paths
1141
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1142
                       for disk in self.disks])
1143
      if len(all_disks) != len(self.disks):
1144
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1145
                                   errors.ECODE_INVAL)
1146
      baddisks = [d for d in all_disks
1147
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1148
      if baddisks:
1149
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1150
                                   " cannot be adopted" %
1151
                                   (utils.CommaJoin(baddisks),
1152
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1153
                                   errors.ECODE_INVAL)
1154

    
1155
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
1156
                                            list(all_disks))[pnode.name]
1157
      node_disks.Raise("Cannot get block device information from node %s" %
1158
                       pnode.name)
1159
      node_disks = node_disks.payload
1160
      delta = all_disks.difference(node_disks.keys())
1161
      if delta:
1162
        raise errors.OpPrereqError("Missing block device(s): %s" %
1163
                                   utils.CommaJoin(delta),
1164
                                   errors.ECODE_INVAL)
1165
      for dsk in self.disks:
1166
        dsk[constants.IDISK_SIZE] = \
1167
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1168

    
1169
    # Verify instance specs
1170
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1171
    ispec = {
1172
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1173
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1174
      constants.ISPEC_DISK_COUNT: len(self.disks),
1175
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1176
                                  for disk in self.disks],
1177
      constants.ISPEC_NIC_COUNT: len(self.nics),
1178
      constants.ISPEC_SPINDLE_USE: spindle_use,
1179
      }
1180

    
1181
    group_info = self.cfg.GetNodeGroup(pnode.group)
1182
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1183
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1184
                                               self.op.disk_template)
1185
    if not self.op.ignore_ipolicy and res:
1186
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1187
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1188
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1189

    
1190
    CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1191

    
1192
    CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1193
    # check OS parameters (remotely)
1194
    CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1195

    
1196
    CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1197

    
1198
    #TODO: _CheckExtParams (remotely)
1199
    # Check parameters for extstorage
1200

    
1201
    # memory check on primary node
1202
    #TODO(dynmem): use MINMEM for checking
1203
    if self.op.start:
1204
      CheckNodeFreeMemory(self, self.pnode.name,
1205
                          "creating instance %s" % self.op.instance_name,
1206
                          self.be_full[constants.BE_MAXMEM],
1207
                          self.op.hypervisor)
1208

    
1209
    self.dry_run_result = list(nodenames)
1210

    
1211
  def Exec(self, feedback_fn):
1212
    """Create and add the instance to the cluster.
1213

1214
    """
1215
    instance = self.op.instance_name
1216
    pnode_name = self.pnode.name
1217

    
1218
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1219
                self.owned_locks(locking.LEVEL_NODE)), \
1220
      "Node locks differ from node resource locks"
1221
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1222

    
1223
    ht_kind = self.op.hypervisor
1224
    if ht_kind in constants.HTS_REQ_PORT:
1225
      network_port = self.cfg.AllocatePort()
1226
    else:
1227
      network_port = None
1228

    
1229
    # This is ugly but we got a chicken-egg problem here
1230
    # We can only take the group disk parameters, as the instance
1231
    # has no disks yet (we are generating them right here).
1232
    node = self.cfg.GetNodeInfo(pnode_name)
1233
    nodegroup = self.cfg.GetNodeGroup(node.group)
1234
    disks = GenerateDiskTemplate(self,
1235
                                 self.op.disk_template,
1236
                                 instance, pnode_name,
1237
                                 self.secondaries,
1238
                                 self.disks,
1239
                                 self.instance_file_storage_dir,
1240
                                 self.op.file_driver,
1241
                                 0,
1242
                                 feedback_fn,
1243
                                 self.cfg.GetGroupDiskParams(nodegroup))
1244

    
1245
    iobj = objects.Instance(name=instance, os=self.op.os_type,
1246
                            primary_node=pnode_name,
1247
                            nics=self.nics, disks=disks,
1248
                            disk_template=self.op.disk_template,
1249
                            disks_active=False,
1250
                            admin_state=constants.ADMINST_DOWN,
1251
                            network_port=network_port,
1252
                            beparams=self.op.beparams,
1253
                            hvparams=self.op.hvparams,
1254
                            hypervisor=self.op.hypervisor,
1255
                            osparams=self.op.osparams,
1256
                            )
1257

    
1258
    if self.op.tags:
1259
      for tag in self.op.tags:
1260
        iobj.AddTag(tag)
1261

    
1262
    if self.adopt_disks:
1263
      if self.op.disk_template == constants.DT_PLAIN:
1264
        # rename LVs to the newly-generated names; we need to construct
1265
        # 'fake' LV disks with the old data, plus the new unique_id
1266
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1267
        rename_to = []
1268
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1269
          rename_to.append(t_dsk.logical_id)
1270
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1271
          self.cfg.SetDiskID(t_dsk, pnode_name)
1272
        result = self.rpc.call_blockdev_rename(pnode_name,
1273
                                               zip(tmp_disks, rename_to))
1274
        result.Raise("Failed to rename adoped LVs")
1275
    else:
1276
      feedback_fn("* creating instance disks...")
1277
      try:
1278
        CreateDisks(self, iobj)
1279
      except errors.OpExecError:
1280
        self.LogWarning("Device creation failed")
1281
        self.cfg.ReleaseDRBDMinors(instance)
1282
        raise
1283

    
1284
    feedback_fn("adding instance %s to cluster config" % instance)
1285

    
1286
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1287

    
1288
    # Declare that we don't want to remove the instance lock anymore, as we've
1289
    # added the instance to the config
1290
    del self.remove_locks[locking.LEVEL_INSTANCE]
1291

    
1292
    if self.op.mode == constants.INSTANCE_IMPORT:
1293
      # Release unused nodes
1294
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1295
    else:
1296
      # Release all nodes
1297
      ReleaseLocks(self, locking.LEVEL_NODE)
1298

    
1299
    disk_abort = False
1300
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1301
      feedback_fn("* wiping instance disks...")
1302
      try:
1303
        WipeDisks(self, iobj)
1304
      except errors.OpExecError, err:
1305
        logging.exception("Wiping disks failed")
1306
        self.LogWarning("Wiping instance disks failed (%s)", err)
1307
        disk_abort = True
1308

    
1309
    if disk_abort:
1310
      # Something is already wrong with the disks, don't do anything else
1311
      pass
1312
    elif self.op.wait_for_sync:
1313
      disk_abort = not WaitForSync(self, iobj)
1314
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1315
      # make sure the disks are not degraded (still sync-ing is ok)
1316
      feedback_fn("* checking mirrors status")
1317
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1318
    else:
1319
      disk_abort = False
1320

    
1321
    if disk_abort:
1322
      RemoveDisks(self, iobj)
1323
      self.cfg.RemoveInstance(iobj.name)
1324
      # Make sure the instance lock gets removed
1325
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1326
      raise errors.OpExecError("There are some degraded disks for"
1327
                               " this instance")
1328

    
1329
    # instance disks are now active
1330
    iobj.disks_active = True
1331

    
1332
    # Release all node resource locks
1333
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1334

    
1335
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1336
      # we need to set the disks ID to the primary node, since the
1337
      # preceding code might or might have not done it, depending on
1338
      # disk template and other options
1339
      for disk in iobj.disks:
1340
        self.cfg.SetDiskID(disk, pnode_name)
1341
      if self.op.mode == constants.INSTANCE_CREATE:
1342
        if not self.op.no_install:
1343
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1344
                        not self.op.wait_for_sync)
1345
          if pause_sync:
1346
            feedback_fn("* pausing disk sync to install instance OS")
1347
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1348
                                                              (iobj.disks,
1349
                                                               iobj), True)
1350
            for idx, success in enumerate(result.payload):
1351
              if not success:
1352
                logging.warn("pause-sync of instance %s for disk %d failed",
1353
                             instance, idx)
1354

    
1355
          feedback_fn("* running the instance OS create scripts...")
1356
          # FIXME: pass debug option from opcode to backend
1357
          os_add_result = \
1358
            self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1359
                                          self.op.debug_level)
1360
          if pause_sync:
1361
            feedback_fn("* resuming disk sync")
1362
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1363
                                                              (iobj.disks,
1364
                                                               iobj), False)
1365
            for idx, success in enumerate(result.payload):
1366
              if not success:
1367
                logging.warn("resume-sync of instance %s for disk %d failed",
1368
                             instance, idx)
1369

    
1370
          os_add_result.Raise("Could not add os for instance %s"
1371
                              " on node %s" % (instance, pnode_name))
1372

    
1373
      else:
1374
        if self.op.mode == constants.INSTANCE_IMPORT:
1375
          feedback_fn("* running the instance OS import scripts...")
1376

    
1377
          transfers = []
1378

    
1379
          for idx, image in enumerate(self.src_images):
1380
            if not image:
1381
              continue
1382

    
1383
            # FIXME: pass debug option from opcode to backend
1384
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1385
                                               constants.IEIO_FILE, (image, ),
1386
                                               constants.IEIO_SCRIPT,
1387
                                               (iobj.disks[idx], idx),
1388
                                               None)
1389
            transfers.append(dt)
1390

    
1391
          import_result = \
1392
            masterd.instance.TransferInstanceData(self, feedback_fn,
1393
                                                  self.op.src_node, pnode_name,
1394
                                                  self.pnode.secondary_ip,
1395
                                                  iobj, transfers)
1396
          if not compat.all(import_result):
1397
            self.LogWarning("Some disks for instance %s on node %s were not"
1398
                            " imported successfully" % (instance, pnode_name))
1399

    
1400
          rename_from = self._old_instance_name
1401

    
1402
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1403
          feedback_fn("* preparing remote import...")
1404
          # The source cluster will stop the instance before attempting to make
1405
          # a connection. In some cases stopping an instance can take a long
1406
          # time, hence the shutdown timeout is added to the connection
1407
          # timeout.
1408
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1409
                             self.op.source_shutdown_timeout)
1410
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1411

    
1412
          assert iobj.primary_node == self.pnode.name
1413
          disk_results = \
1414
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1415
                                          self.source_x509_ca,
1416
                                          self._cds, timeouts)
1417
          if not compat.all(disk_results):
1418
            # TODO: Should the instance still be started, even if some disks
1419
            # failed to import (valid for local imports, too)?
1420
            self.LogWarning("Some disks for instance %s on node %s were not"
1421
                            " imported successfully" % (instance, pnode_name))
1422

    
1423
          rename_from = self.source_instance_name
1424

    
1425
        else:
1426
          # also checked in the prereq part
1427
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1428
                                       % self.op.mode)
1429

    
1430
        # Run rename script on newly imported instance
1431
        assert iobj.name == instance
1432
        feedback_fn("Running rename script for %s" % instance)
1433
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1434
                                                   rename_from,
1435
                                                   self.op.debug_level)
1436
        if result.fail_msg:
1437
          self.LogWarning("Failed to run rename script for %s on node"
1438
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
1439

    
1440
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1441

    
1442
    if self.op.start:
1443
      iobj.admin_state = constants.ADMINST_UP
1444
      self.cfg.Update(iobj, feedback_fn)
1445
      logging.info("Starting instance %s on node %s", instance, pnode_name)
1446
      feedback_fn("* starting instance...")
1447
      result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1448
                                            False, self.op.reason)
1449
      result.Raise("Could not start instance")
1450

    
1451
    return list(iobj.all_nodes)
1452

    
1453

    
1454
class LUInstanceRename(LogicalUnit):
1455
  """Rename an instance.
1456

1457
  """
1458
  HPATH = "instance-rename"
1459
  HTYPE = constants.HTYPE_INSTANCE
1460

    
1461
  def CheckArguments(self):
1462
    """Check arguments.
1463

1464
    """
1465
    if self.op.ip_check and not self.op.name_check:
1466
      # TODO: make the ip check more flexible and not depend on the name check
1467
      raise errors.OpPrereqError("IP address check requires a name check",
1468
                                 errors.ECODE_INVAL)
1469

    
1470
  def BuildHooksEnv(self):
1471
    """Build hooks env.
1472

1473
    This runs on master, primary and secondary nodes of the instance.
1474

1475
    """
1476
    env = BuildInstanceHookEnvByObject(self, self.instance)
1477
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1478
    return env
1479

    
1480
  def BuildHooksNodes(self):
1481
    """Build hooks nodes.
1482

1483
    """
1484
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1485
    return (nl, nl)
1486

    
1487
  def CheckPrereq(self):
1488
    """Check prerequisites.
1489

1490
    This checks that the instance is in the cluster and is not running.
1491

1492
    """
1493
    self.op.instance_name = ExpandInstanceName(self.cfg,
1494
                                               self.op.instance_name)
1495
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1496
    assert instance is not None
1497
    CheckNodeOnline(self, instance.primary_node)
1498
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1499
                       msg="cannot rename")
1500
    self.instance = instance
1501

    
1502
    new_name = self.op.new_name
1503
    if self.op.name_check:
1504
      hostname = _CheckHostnameSane(self, new_name)
1505
      new_name = self.op.new_name = hostname.name
1506
      if (self.op.ip_check and
1507
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1508
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1509
                                   (hostname.ip, new_name),
1510
                                   errors.ECODE_NOTUNIQUE)
1511

    
1512
    instance_list = self.cfg.GetInstanceList()
1513
    if new_name in instance_list and new_name != instance.name:
1514
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1515
                                 new_name, errors.ECODE_EXISTS)
1516

    
1517
  def Exec(self, feedback_fn):
1518
    """Rename the instance.
1519

1520
    """
1521
    inst = self.instance
1522
    old_name = inst.name
1523

    
1524
    rename_file_storage = False
1525
    if (inst.disk_template in constants.DTS_FILEBASED and
1526
        self.op.new_name != inst.name):
1527
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1528
      rename_file_storage = True
1529

    
1530
    self.cfg.RenameInstance(inst.name, self.op.new_name)
1531
    # Change the instance lock. This is definitely safe while we hold the BGL.
1532
    # Otherwise the new lock would have to be added in acquired mode.
1533
    assert self.REQ_BGL
1534
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1535
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1536
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1537

    
1538
    # re-read the instance from the configuration after rename
1539
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
1540

    
1541
    if rename_file_storage:
1542
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1543
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1544
                                                     old_file_storage_dir,
1545
                                                     new_file_storage_dir)
1546
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1547
                   " (but the instance has been renamed in Ganeti)" %
1548
                   (inst.primary_node, old_file_storage_dir,
1549
                    new_file_storage_dir))
1550

    
1551
    StartInstanceDisks(self, inst, None)
1552
    # update info on disks
1553
    info = GetInstanceInfoText(inst)
1554
    for (idx, disk) in enumerate(inst.disks):
1555
      for node in inst.all_nodes:
1556
        self.cfg.SetDiskID(disk, node)
1557
        result = self.rpc.call_blockdev_setinfo(node, disk, info)
1558
        if result.fail_msg:
1559
          self.LogWarning("Error setting info on node %s for disk %s: %s",
1560
                          node, idx, result.fail_msg)
1561
    try:
1562
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1563
                                                 old_name, self.op.debug_level)
1564
      msg = result.fail_msg
1565
      if msg:
1566
        msg = ("Could not run OS rename script for instance %s on node %s"
1567
               " (but the instance has been renamed in Ganeti): %s" %
1568
               (inst.name, inst.primary_node, msg))
1569
        self.LogWarning(msg)
1570
    finally:
1571
      ShutdownInstanceDisks(self, inst)
1572

    
1573
    return inst.name
1574

    
1575

    
1576
class LUInstanceRemove(LogicalUnit):
1577
  """Remove an instance.
1578

1579
  """
1580
  HPATH = "instance-remove"
1581
  HTYPE = constants.HTYPE_INSTANCE
1582
  REQ_BGL = False
1583

    
1584
  def ExpandNames(self):
1585
    self._ExpandAndLockInstance()
1586
    self.needed_locks[locking.LEVEL_NODE] = []
1587
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1588
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1589

    
1590
  def DeclareLocks(self, level):
1591
    if level == locking.LEVEL_NODE:
1592
      self._LockInstancesNodes()
1593
    elif level == locking.LEVEL_NODE_RES:
1594
      # Copy node locks
1595
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1596
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1597

    
1598
  def BuildHooksEnv(self):
1599
    """Build hooks env.
1600

1601
    This runs on master, primary and secondary nodes of the instance.
1602

1603
    """
1604
    env = BuildInstanceHookEnvByObject(self, self.instance)
1605
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1606
    return env
1607

    
1608
  def BuildHooksNodes(self):
1609
    """Build hooks nodes.
1610

1611
    """
1612
    nl = [self.cfg.GetMasterNode()]
1613
    nl_post = list(self.instance.all_nodes) + nl
1614
    return (nl, nl_post)
1615

    
1616
  def CheckPrereq(self):
1617
    """Check prerequisites.
1618

1619
    This checks that the instance is in the cluster.
1620

1621
    """
1622
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1623
    assert self.instance is not None, \
1624
      "Cannot retrieve locked instance %s" % self.op.instance_name
1625

    
1626
  def Exec(self, feedback_fn):
1627
    """Remove the instance.
1628

1629
    """
1630
    instance = self.instance
1631
    logging.info("Shutting down instance %s on node %s",
1632
                 instance.name, instance.primary_node)
1633

    
1634
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1635
                                             self.op.shutdown_timeout,
1636
                                             self.op.reason)
1637
    msg = result.fail_msg
1638
    if msg:
1639
      if self.op.ignore_failures:
1640
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
1641
      else:
1642
        raise errors.OpExecError("Could not shutdown instance %s on"
1643
                                 " node %s: %s" %
1644
                                 (instance.name, instance.primary_node, msg))
1645

    
1646
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1647
            self.owned_locks(locking.LEVEL_NODE_RES))
1648
    assert not (set(instance.all_nodes) -
1649
                self.owned_locks(locking.LEVEL_NODE)), \
1650
      "Not owning correct locks"
1651

    
1652
    RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1653

    
1654

    
1655
class LUInstanceMove(LogicalUnit):
1656
  """Move an instance by data-copying.
1657

1658
  """
1659
  HPATH = "instance-move"
1660
  HTYPE = constants.HTYPE_INSTANCE
1661
  REQ_BGL = False
1662

    
1663
  def ExpandNames(self):
1664
    self._ExpandAndLockInstance()
1665
    target_node = ExpandNodeName(self.cfg, self.op.target_node)
1666
    self.op.target_node = target_node
1667
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
1668
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1669
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1670

    
1671
  def DeclareLocks(self, level):
1672
    if level == locking.LEVEL_NODE:
1673
      self._LockInstancesNodes(primary_only=True)
1674
    elif level == locking.LEVEL_NODE_RES:
1675
      # Copy node locks
1676
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1677
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1678

    
1679
  def BuildHooksEnv(self):
1680
    """Build hooks env.
1681

1682
    This runs on master, primary and secondary nodes of the instance.
1683

1684
    """
1685
    env = {
1686
      "TARGET_NODE": self.op.target_node,
1687
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1688
      }
1689
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1690
    return env
1691

    
1692
  def BuildHooksNodes(self):
1693
    """Build hooks nodes.
1694

1695
    """
1696
    nl = [
1697
      self.cfg.GetMasterNode(),
1698
      self.instance.primary_node,
1699
      self.op.target_node,
1700
      ]
1701
    return (nl, nl)
1702

    
1703
  def CheckPrereq(self):
1704
    """Check prerequisites.
1705

1706
    This checks that the instance is in the cluster.
1707

1708
    """
1709
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1710
    assert self.instance is not None, \
1711
      "Cannot retrieve locked instance %s" % self.op.instance_name
1712

    
1713
    if instance.disk_template not in constants.DTS_COPYABLE:
1714
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1715
                                 instance.disk_template, errors.ECODE_STATE)
1716

    
1717
    node = self.cfg.GetNodeInfo(self.op.target_node)
1718
    assert node is not None, \
1719
      "Cannot retrieve locked node %s" % self.op.target_node
1720

    
1721
    self.target_node = target_node = node.name
1722

    
1723
    if target_node == instance.primary_node:
1724
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1725
                                 (instance.name, target_node),
1726
                                 errors.ECODE_STATE)
1727

    
1728
    bep = self.cfg.GetClusterInfo().FillBE(instance)
1729

    
1730
    for idx, dsk in enumerate(instance.disks):
1731
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1732
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1733
                                   " cannot copy" % idx, errors.ECODE_STATE)
1734

    
1735
    CheckNodeOnline(self, target_node)
1736
    CheckNodeNotDrained(self, target_node)
1737
    CheckNodeVmCapable(self, target_node)
1738
    cluster = self.cfg.GetClusterInfo()
1739
    group_info = self.cfg.GetNodeGroup(node.group)
1740
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1741
    CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1742
                           ignore=self.op.ignore_ipolicy)
1743

    
1744
    if instance.admin_state == constants.ADMINST_UP:
1745
      # check memory requirements on the secondary node
1746
      CheckNodeFreeMemory(self, target_node,
1747
                          "failing over instance %s" %
1748
                          instance.name, bep[constants.BE_MAXMEM],
1749
                          instance.hypervisor)
1750
    else:
1751
      self.LogInfo("Not checking memory on the secondary node as"
1752
                   " instance will not be started")
1753

    
1754
    # check bridge existance
1755
    CheckInstanceBridgesExist(self, instance, node=target_node)
1756

    
1757
  def Exec(self, feedback_fn):
1758
    """Move an instance.
1759

1760
    The move is done by shutting it down on its present node, copying
1761
    the data over (slow) and starting it on the new node.
1762

1763
    """
1764
    instance = self.instance
1765

    
1766
    source_node = instance.primary_node
1767
    target_node = self.target_node
1768

    
1769
    self.LogInfo("Shutting down instance %s on source node %s",
1770
                 instance.name, source_node)
1771

    
1772
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1773
            self.owned_locks(locking.LEVEL_NODE_RES))
1774

    
1775
    result = self.rpc.call_instance_shutdown(source_node, instance,
1776
                                             self.op.shutdown_timeout,
1777
                                             self.op.reason)
1778
    msg = result.fail_msg
1779
    if msg:
1780
      if self.op.ignore_consistency:
1781
        self.LogWarning("Could not shutdown instance %s on node %s."
1782
                        " Proceeding anyway. Please make sure node"
1783
                        " %s is down. Error details: %s",
1784
                        instance.name, source_node, source_node, msg)
1785
      else:
1786
        raise errors.OpExecError("Could not shutdown instance %s on"
1787
                                 " node %s: %s" %
1788
                                 (instance.name, source_node, msg))
1789

    
1790
    # create the target disks
1791
    try:
1792
      CreateDisks(self, instance, target_node=target_node)
1793
    except errors.OpExecError:
1794
      self.LogWarning("Device creation failed")
1795
      self.cfg.ReleaseDRBDMinors(instance.name)
1796
      raise
1797

    
1798
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1799

    
1800
    errs = []
1801
    # activate, get path, copy the data over
1802
    for idx, disk in enumerate(instance.disks):
1803
      self.LogInfo("Copying data for disk %d", idx)
1804
      result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1805
                                               instance.name, True, idx)
1806
      if result.fail_msg:
1807
        self.LogWarning("Can't assemble newly created disk %d: %s",
1808
                        idx, result.fail_msg)
1809
        errs.append(result.fail_msg)
1810
        break
1811
      dev_path, _ = result.payload
1812
      result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1813
                                             target_node, dev_path,
1814
                                             cluster_name)
1815
      if result.fail_msg:
1816
        self.LogWarning("Can't copy data over for disk %d: %s",
1817
                        idx, result.fail_msg)
1818
        errs.append(result.fail_msg)
1819
        break
1820

    
1821
    if errs:
1822
      self.LogWarning("Some disks failed to copy, aborting")
1823
      try:
1824
        RemoveDisks(self, instance, target_node=target_node)
1825
      finally:
1826
        self.cfg.ReleaseDRBDMinors(instance.name)
1827
        raise errors.OpExecError("Errors during disk copy: %s" %
1828
                                 (",".join(errs),))
1829

    
1830
    instance.primary_node = target_node
1831
    self.cfg.Update(instance, feedback_fn)
1832

    
1833
    self.LogInfo("Removing the disks on the original node")
1834
    RemoveDisks(self, instance, target_node=source_node)
1835

    
1836
    # Only start the instance if it's marked as up
1837
    if instance.admin_state == constants.ADMINST_UP:
1838
      self.LogInfo("Starting instance %s on node %s",
1839
                   instance.name, target_node)
1840

    
1841
      disks_ok, _ = AssembleInstanceDisks(self, instance,
1842
                                          ignore_secondaries=True)
1843
      if not disks_ok:
1844
        ShutdownInstanceDisks(self, instance)
1845
        raise errors.OpExecError("Can't activate the instance's disks")
1846

    
1847
      result = self.rpc.call_instance_start(target_node,
1848
                                            (instance, None, None), False,
1849
                                            self.op.reason)
1850
      msg = result.fail_msg
1851
      if msg:
1852
        ShutdownInstanceDisks(self, instance)
1853
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1854
                                 (instance.name, target_node, msg))
1855

    
1856

    
1857
class LUInstanceMultiAlloc(NoHooksLU):
1858
  """Allocates multiple instances at the same time.
1859

1860
  """
1861
  REQ_BGL = False
1862

    
1863
  def CheckArguments(self):
1864
    """Check arguments.
1865

1866
    """
1867
    nodes = []
1868
    for inst in self.op.instances:
1869
      if inst.iallocator is not None:
1870
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1871
                                   " instance objects", errors.ECODE_INVAL)
1872
      nodes.append(bool(inst.pnode))
1873
      if inst.disk_template in constants.DTS_INT_MIRROR:
1874
        nodes.append(bool(inst.snode))
1875

    
1876
    has_nodes = compat.any(nodes)
1877
    if compat.all(nodes) ^ has_nodes:
1878
      raise errors.OpPrereqError("There are instance objects providing"
1879
                                 " pnode/snode while others do not",
1880
                                 errors.ECODE_INVAL)
1881

    
1882
    if not has_nodes and self.op.iallocator is None:
1883
      default_iallocator = self.cfg.GetDefaultIAllocator()
1884
      if default_iallocator:
1885
        self.op.iallocator = default_iallocator
1886
      else:
1887
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1888
                                   " given and no cluster-wide default"
1889
                                   " iallocator found; please specify either"
1890
                                   " an iallocator or nodes on the instances"
1891
                                   " or set a cluster-wide default iallocator",
1892
                                   errors.ECODE_INVAL)
1893

    
1894
    _CheckOpportunisticLocking(self.op)
1895

    
1896
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1897
    if dups:
1898
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1899
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1900

    
1901
  def ExpandNames(self):
1902
    """Calculate the locks.
1903

1904
    """
1905
    self.share_locks = ShareAll()
1906
    self.needed_locks = {
1907
      # iallocator will select nodes and even if no iallocator is used,
1908
      # collisions with LUInstanceCreate should be avoided
1909
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1910
      }
1911

    
1912
    if self.op.iallocator:
1913
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1914
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1915

    
1916
      if self.op.opportunistic_locking:
1917
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1918
    else:
1919
      nodeslist = []
1920
      for inst in self.op.instances:
1921
        inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1922
        nodeslist.append(inst.pnode)
1923
        if inst.snode is not None:
1924
          inst.snode = ExpandNodeName(self.cfg, inst.snode)
1925
          nodeslist.append(inst.snode)
1926

    
1927
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1928
      # Lock resources of instance's primary and secondary nodes (copy to
1929
      # prevent accidential modification)
1930
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1931

    
1932
  def DeclareLocks(self, level):
1933
    if level == locking.LEVEL_NODE_RES and \
1934
      self.opportunistic_locks[locking.LEVEL_NODE]:
1935
      # Even when using opportunistic locking, we require the same set of
1936
      # NODE_RES locks as we got NODE locks
1937
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1938
        self.owned_locks(locking.LEVEL_NODE)
1939

    
1940
  def CheckPrereq(self):
1941
    """Check prerequisite.
1942

1943
    """
1944
    if self.op.iallocator:
1945
      cluster = self.cfg.GetClusterInfo()
1946
      default_vg = self.cfg.GetVGName()
1947
      ec_id = self.proc.GetECId()
1948

    
1949
      if self.op.opportunistic_locking:
1950
        # Only consider nodes for which a lock is held
1951
        node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1952
      else:
1953
        node_whitelist = None
1954

    
1955
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1956
                                           _ComputeNics(op, cluster, None,
1957
                                                        self.cfg, ec_id),
1958
                                           _ComputeFullBeParams(op, cluster),
1959
                                           node_whitelist)
1960
               for op in self.op.instances]
1961

    
1962
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1963
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1964

    
1965
      ial.Run(self.op.iallocator)
1966

    
1967
      if not ial.success:
1968
        raise errors.OpPrereqError("Can't compute nodes using"
1969
                                   " iallocator '%s': %s" %
1970
                                   (self.op.iallocator, ial.info),
1971
                                   errors.ECODE_NORES)
1972

    
1973
      self.ia_result = ial.result
1974

    
1975
    if self.op.dry_run:
1976
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1977
        constants.JOB_IDS_KEY: [],
1978
        })
1979

    
1980
  def _ConstructPartialResult(self):
1981
    """Contructs the partial result.
1982

1983
    """
1984
    if self.op.iallocator:
1985
      (allocatable, failed_insts) = self.ia_result
1986
      allocatable_insts = map(compat.fst, allocatable)
1987
    else:
1988
      allocatable_insts = [op.instance_name for op in self.op.instances]
1989
      failed_insts = []
1990

    
1991
    return {
1992
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
1993
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
1994
      }
1995

    
1996
  def Exec(self, feedback_fn):
1997
    """Executes the opcode.
1998

1999
    """
2000
    jobs = []
2001
    if self.op.iallocator:
2002
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2003
      (allocatable, failed) = self.ia_result
2004

    
2005
      for (name, nodes) in allocatable:
2006
        op = op2inst.pop(name)
2007

    
2008
        if len(nodes) > 1:
2009
          (op.pnode, op.snode) = nodes
2010
        else:
2011
          (op.pnode,) = nodes
2012

    
2013
        jobs.append([op])
2014

    
2015
      missing = set(op2inst.keys()) - set(failed)
2016
      assert not missing, \
2017
        "Iallocator did return incomplete result: %s" % \
2018
        utils.CommaJoin(missing)
2019
    else:
2020
      jobs.extend([op] for op in self.op.instances)
2021

    
2022
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2023

    
2024

    
2025
class _InstNicModPrivate:
2026
  """Data structure for network interface modifications.
2027

2028
  Used by L{LUInstanceSetParams}.
2029

2030
  """
2031
  def __init__(self):
2032
    self.params = None
2033
    self.filled = None
2034

    
2035

    
2036
def _PrepareContainerMods(mods, private_fn):
2037
  """Prepares a list of container modifications by adding a private data field.
2038

2039
  @type mods: list of tuples; (operation, index, parameters)
2040
  @param mods: List of modifications
2041
  @type private_fn: callable or None
2042
  @param private_fn: Callable for constructing a private data field for a
2043
    modification
2044
  @rtype: list
2045

2046
  """
2047
  if private_fn is None:
2048
    fn = lambda: None
2049
  else:
2050
    fn = private_fn
2051

    
2052
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2053

    
2054

    
2055
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
2056
  """Checks if nodes have enough physical CPUs
2057

2058
  This function checks if all given nodes have the needed number of
2059
  physical CPUs. In case any node has less CPUs or we cannot get the
2060
  information from the node, this function raises an OpPrereqError
2061
  exception.
2062

2063
  @type lu: C{LogicalUnit}
2064
  @param lu: a logical unit from which we get configuration data
2065
  @type nodenames: C{list}
2066
  @param nodenames: the list of node names to check
2067
  @type requested: C{int}
2068
  @param requested: the minimum acceptable number of physical CPUs
2069
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2070
      or we cannot check the node
2071

2072
  """
2073
  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2074
  for node in nodenames:
2075
    info = nodeinfo[node]
2076
    info.Raise("Cannot get current information from node %s" % node,
2077
               prereq=True, ecode=errors.ECODE_ENVIRON)
2078
    (_, _, (hv_info, )) = info.payload
2079
    num_cpus = hv_info.get("cpu_total", None)
2080
    if not isinstance(num_cpus, int):
2081
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2082
                                 " on node %s, result was '%s'" %
2083
                                 (node, num_cpus), errors.ECODE_ENVIRON)
2084
    if requested > num_cpus:
2085
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2086
                                 "required" % (node, num_cpus, requested),
2087
                                 errors.ECODE_NORES)
2088

    
2089

    
2090
def GetItemFromContainer(identifier, kind, container):
2091
  """Return the item refered by the identifier.
2092

2093
  @type identifier: string
2094
  @param identifier: Item index or name or UUID
2095
  @type kind: string
2096
  @param kind: One-word item description
2097
  @type container: list
2098
  @param container: Container to get the item from
2099

2100
  """
2101
  # Index
2102
  try:
2103
    idx = int(identifier)
2104
    if idx == -1:
2105
      # Append
2106
      absidx = len(container) - 1
2107
    elif idx < 0:
2108
      raise IndexError("Not accepting negative indices other than -1")
2109
    elif idx > len(container):
2110
      raise IndexError("Got %s index %s, but there are only %s" %
2111
                       (kind, idx, len(container)))
2112
    else:
2113
      absidx = idx
2114
    return (absidx, container[idx])
2115
  except ValueError:
2116
    pass
2117

    
2118
  for idx, item in enumerate(container):
2119
    if item.uuid == identifier or item.name == identifier:
2120
      return (idx, item)
2121

    
2122
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2123
                             (kind, identifier), errors.ECODE_NOENT)
2124

    
2125

    
2126
def _ApplyContainerMods(kind, container, chgdesc, mods,
2127
                        create_fn, modify_fn, remove_fn):
2128
  """Applies descriptions in C{mods} to C{container}.
2129

2130
  @type kind: string
2131
  @param kind: One-word item description
2132
  @type container: list
2133
  @param container: Container to modify
2134
  @type chgdesc: None or list
2135
  @param chgdesc: List of applied changes
2136
  @type mods: list
2137
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2138
  @type create_fn: callable
2139
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2140
    receives absolute item index, parameters and private data object as added
2141
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2142
    as list
2143
  @type modify_fn: callable
2144
  @param modify_fn: Callback for modifying an existing item
2145
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2146
    and private data object as added by L{_PrepareContainerMods}, returns
2147
    changes as list
2148
  @type remove_fn: callable
2149
  @param remove_fn: Callback on removing item; receives absolute item index,
2150
    item and private data object as added by L{_PrepareContainerMods}
2151

2152
  """
2153
  for (op, identifier, params, private) in mods:
2154
    changes = None
2155

    
2156
    if op == constants.DDM_ADD:
2157
      # Calculate where item will be added
2158
      # When adding an item, identifier can only be an index
2159
      try:
2160
        idx = int(identifier)
2161
      except ValueError:
2162
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2163
                                   " identifier for %s" % constants.DDM_ADD,
2164
                                   errors.ECODE_INVAL)
2165
      if idx == -1:
2166
        addidx = len(container)
2167
      else:
2168
        if idx < 0:
2169
          raise IndexError("Not accepting negative indices other than -1")
2170
        elif idx > len(container):
2171
          raise IndexError("Got %s index %s, but there are only %s" %
2172
                           (kind, idx, len(container)))
2173
        addidx = idx
2174

    
2175
      if create_fn is None:
2176
        item = params
2177
      else:
2178
        (item, changes) = create_fn(addidx, params, private)
2179

    
2180
      if idx == -1:
2181
        container.append(item)
2182
      else:
2183
        assert idx >= 0
2184
        assert idx <= len(container)
2185
        # list.insert does so before the specified index
2186
        container.insert(idx, item)
2187
    else:
2188
      # Retrieve existing item
2189
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2190

    
2191
      if op == constants.DDM_REMOVE:
2192
        assert not params
2193

    
2194
        changes = [("%s/%s" % (kind, absidx), "remove")]
2195

    
2196
        if remove_fn is not None:
2197
          msg = remove_fn(absidx, item, private)
2198
          if msg:
2199
            changes.append(("%s/%s" % (kind, absidx), msg))
2200

    
2201
        assert container[absidx] == item
2202
        del container[absidx]
2203
      elif op == constants.DDM_MODIFY:
2204
        if modify_fn is not None:
2205
          changes = modify_fn(absidx, item, params, private)
2206
      else:
2207
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2208

    
2209
    assert _TApplyContModsCbChanges(changes)
2210

    
2211
    if not (chgdesc is None or changes is None):
2212
      chgdesc.extend(changes)
2213

    
2214

    
2215
def _UpdateIvNames(base_index, disks):
2216
  """Updates the C{iv_name} attribute of disks.
2217

2218
  @type disks: list of L{objects.Disk}
2219

2220
  """
2221
  for (idx, disk) in enumerate(disks):
2222
    disk.iv_name = "disk/%s" % (base_index + idx, )
2223

    
2224

    
2225
class LUInstanceSetParams(LogicalUnit):
2226
  """Modifies an instances's parameters.
2227

2228
  """
2229
  HPATH = "instance-modify"
2230
  HTYPE = constants.HTYPE_INSTANCE
2231
  REQ_BGL = False
2232

    
2233
  @staticmethod
2234
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2235
    assert ht.TList(mods)
2236
    assert not mods or len(mods[0]) in (2, 3)
2237

    
2238
    if mods and len(mods[0]) == 2:
2239
      result = []
2240

    
2241
      addremove = 0
2242
      for op, params in mods:
2243
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2244
          result.append((op, -1, params))
2245
          addremove += 1
2246

    
2247
          if addremove > 1:
2248
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2249
                                       " supported at a time" % kind,
2250
                                       errors.ECODE_INVAL)
2251
        else:
2252
          result.append((constants.DDM_MODIFY, op, params))
2253

    
2254
      assert verify_fn(result)
2255
    else:
2256
      result = mods
2257

    
2258
    return result
2259

    
2260
  @staticmethod
2261
  def _CheckMods(kind, mods, key_types, item_fn):
2262
    """Ensures requested disk/NIC modifications are valid.
2263

2264
    """
2265
    for (op, _, params) in mods:
2266
      assert ht.TDict(params)
2267

    
2268
      # If 'key_types' is an empty dict, we assume we have an
2269
      # 'ext' template and thus do not ForceDictType
2270
      if key_types:
2271
        utils.ForceDictType(params, key_types)
2272

    
2273
      if op == constants.DDM_REMOVE:
2274
        if params:
2275
          raise errors.OpPrereqError("No settings should be passed when"
2276
                                     " removing a %s" % kind,
2277
                                     errors.ECODE_INVAL)
2278
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2279
        item_fn(op, params)
2280
      else:
2281
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2282

    
2283
  def _VerifyDiskModification(self, op, params):
2284
    """Verifies a disk modification.
2285

2286
    """
2287
    if op == constants.DDM_ADD:
2288
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2289
      if mode not in constants.DISK_ACCESS_SET:
2290
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2291
                                   errors.ECODE_INVAL)
2292

    
2293
      size = params.get(constants.IDISK_SIZE, None)
2294
      if size is None:
2295
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2296
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2297

    
2298
      try:
2299
        size = int(size)
2300
      except (TypeError, ValueError), err:
2301
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2302
                                   errors.ECODE_INVAL)
2303

    
2304
      params[constants.IDISK_SIZE] = size
2305
      name = params.get(constants.IDISK_NAME, None)
2306
      if name is not None and name.lower() == constants.VALUE_NONE:
2307
        params[constants.IDISK_NAME] = None
2308

    
2309
    elif op == constants.DDM_MODIFY:
2310
      if constants.IDISK_SIZE in params:
2311
        raise errors.OpPrereqError("Disk size change not possible, use"
2312
                                   " grow-disk", errors.ECODE_INVAL)
2313

    
2314
      # Disk modification supports changing only the disk name and mode.
2315
      # Changing arbitrary parameters is allowed only for ext disk template",
2316
      if self.instance.disk_template != constants.DT_EXT:
2317
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2318

    
2319
      name = params.get(constants.IDISK_NAME, None)
2320
      if name is not None and name.lower() == constants.VALUE_NONE:
2321
        params[constants.IDISK_NAME] = None
2322

    
2323
  @staticmethod
2324
  def _VerifyNicModification(op, params):
2325
    """Verifies a network interface modification.
2326

2327
    """
2328
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2329
      ip = params.get(constants.INIC_IP, None)
2330
      name = params.get(constants.INIC_NAME, None)
2331
      req_net = params.get(constants.INIC_NETWORK, None)
2332
      link = params.get(constants.NIC_LINK, None)
2333
      mode = params.get(constants.NIC_MODE, None)
2334
      if name is not None and name.lower() == constants.VALUE_NONE:
2335
        params[constants.INIC_NAME] = None
2336
      if req_net is not None:
2337
        if req_net.lower() == constants.VALUE_NONE:
2338
          params[constants.INIC_NETWORK] = None
2339
          req_net = None
2340
        elif link is not None or mode is not None:
2341
          raise errors.OpPrereqError("If network is given"
2342
                                     " mode or link should not",
2343
                                     errors.ECODE_INVAL)
2344

    
2345
      if op == constants.DDM_ADD:
2346
        macaddr = params.get(constants.INIC_MAC, None)
2347
        if macaddr is None:
2348
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2349

    
2350
      if ip is not None:
2351
        if ip.lower() == constants.VALUE_NONE:
2352
          params[constants.INIC_IP] = None
2353
        else:
2354
          if ip.lower() == constants.NIC_IP_POOL:
2355
            if op == constants.DDM_ADD and req_net is None:
2356
              raise errors.OpPrereqError("If ip=pool, parameter network"
2357
                                         " cannot be none",
2358
                                         errors.ECODE_INVAL)
2359
          else:
2360
            if not netutils.IPAddress.IsValid(ip):
2361
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2362
                                         errors.ECODE_INVAL)
2363

    
2364
      if constants.INIC_MAC in params:
2365
        macaddr = params[constants.INIC_MAC]
2366
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2367
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2368

    
2369
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2370
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2371
                                     " modifying an existing NIC",
2372
                                     errors.ECODE_INVAL)
2373

    
2374
  def CheckArguments(self):
2375
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2376
            self.op.hvparams or self.op.beparams or self.op.os_name or
2377
            self.op.osparams or self.op.offline is not None or
2378
            self.op.runtime_mem or self.op.pnode):
2379
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2380

    
2381
    if self.op.hvparams:
2382
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2383
                           "hypervisor", "instance", "cluster")
2384

    
2385
    self.op.disks = self._UpgradeDiskNicMods(
2386
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2387
    self.op.nics = self._UpgradeDiskNicMods(
2388
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2389

    
2390
    if self.op.disks and self.op.disk_template is not None:
2391
      raise errors.OpPrereqError("Disk template conversion and other disk"
2392
                                 " changes not supported at the same time",
2393
                                 errors.ECODE_INVAL)
2394

    
2395
    if (self.op.disk_template and
2396
        self.op.disk_template in constants.DTS_INT_MIRROR and
2397
        self.op.remote_node is None):
2398
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2399
                                 " one requires specifying a secondary node",
2400
                                 errors.ECODE_INVAL)
2401

    
2402
    # Check NIC modifications
2403
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2404
                    self._VerifyNicModification)
2405

    
2406
    if self.op.pnode:
2407
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2408

    
2409
  def ExpandNames(self):
2410
    self._ExpandAndLockInstance()
2411
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2412
    # Can't even acquire node locks in shared mode as upcoming changes in
2413
    # Ganeti 2.6 will start to modify the node object on disk conversion
2414
    self.needed_locks[locking.LEVEL_NODE] = []
2415
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2416
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2417
    # Look node group to look up the ipolicy
2418
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2419

    
2420
  def DeclareLocks(self, level):
2421
    if level == locking.LEVEL_NODEGROUP:
2422
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2423
      # Acquire locks for the instance's nodegroups optimistically. Needs
2424
      # to be verified in CheckPrereq
2425
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2426
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2427
    elif level == locking.LEVEL_NODE:
2428
      self._LockInstancesNodes()
2429
      if self.op.disk_template and self.op.remote_node:
2430
        self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2431
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2432
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2433
      # Copy node locks
2434
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2435
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2436

    
2437
  def BuildHooksEnv(self):
2438
    """Build hooks env.
2439

2440
    This runs on the master, primary and secondaries.
2441

2442
    """
2443
    args = {}
2444
    if constants.BE_MINMEM in self.be_new:
2445
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2446
    if constants.BE_MAXMEM in self.be_new:
2447
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2448
    if constants.BE_VCPUS in self.be_new:
2449
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2450
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2451
    # information at all.
2452

    
2453
    if self._new_nics is not None:
2454
      nics = []
2455

    
2456
      for nic in self._new_nics:
2457
        n = copy.deepcopy(nic)
2458
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2459
        n.nicparams = nicparams
2460
        nics.append(NICToTuple(self, n))
2461

    
2462
      args["nics"] = nics
2463

    
2464
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2465
    if self.op.disk_template:
2466
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2467
    if self.op.runtime_mem:
2468
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2469

    
2470
    return env
2471

    
2472
  def BuildHooksNodes(self):
2473
    """Build hooks nodes.
2474

2475
    """
2476
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2477
    return (nl, nl)
2478

    
2479
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2480
                              old_params, cluster, pnode):
2481

    
2482
    update_params_dict = dict([(key, params[key])
2483
                               for key in constants.NICS_PARAMETERS
2484
                               if key in params])
2485

    
2486
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2487
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2488

    
2489
    new_net_uuid = None
2490
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2491
    if new_net_uuid_or_name:
2492
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2493
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2494

    
2495
    if old_net_uuid:
2496
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2497

    
2498
    if new_net_uuid:
2499
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2500
      if not netparams:
2501
        raise errors.OpPrereqError("No netparams found for the network"
2502
                                   " %s, probably not connected" %
2503
                                   new_net_obj.name, errors.ECODE_INVAL)
2504
      new_params = dict(netparams)
2505
    else:
2506
      new_params = GetUpdatedParams(old_params, update_params_dict)
2507

    
2508
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2509

    
2510
    new_filled_params = cluster.SimpleFillNIC(new_params)
2511
    objects.NIC.CheckParameterSyntax(new_filled_params)
2512

    
2513
    new_mode = new_filled_params[constants.NIC_MODE]
2514
    if new_mode == constants.NIC_MODE_BRIDGED:
2515
      bridge = new_filled_params[constants.NIC_LINK]
2516
      msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2517
      if msg:
2518
        msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2519
        if self.op.force:
2520
          self.warn.append(msg)
2521
        else:
2522
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2523

    
2524
    elif new_mode == constants.NIC_MODE_ROUTED:
2525
      ip = params.get(constants.INIC_IP, old_ip)
2526
      if ip is None:
2527
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2528
                                   " on a routed NIC", errors.ECODE_INVAL)
2529

    
2530
    elif new_mode == constants.NIC_MODE_OVS:
2531
      # TODO: check OVS link
2532
      self.LogInfo("OVS links are currently not checked for correctness")
2533

    
2534
    if constants.INIC_MAC in params:
2535
      mac = params[constants.INIC_MAC]
2536
      if mac is None:
2537
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2538
                                   errors.ECODE_INVAL)
2539
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2540
        # otherwise generate the MAC address
2541
        params[constants.INIC_MAC] = \
2542
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2543
      else:
2544
        # or validate/reserve the current one
2545
        try:
2546
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2547
        except errors.ReservationError:
2548
          raise errors.OpPrereqError("MAC address '%s' already in use"
2549
                                     " in cluster" % mac,
2550
                                     errors.ECODE_NOTUNIQUE)
2551
    elif new_net_uuid != old_net_uuid:
2552

    
2553
      def get_net_prefix(net_uuid):
2554
        mac_prefix = None
2555
        if net_uuid:
2556
          nobj = self.cfg.GetNetwork(net_uuid)
2557
          mac_prefix = nobj.mac_prefix
2558

    
2559
        return mac_prefix
2560

    
2561
      new_prefix = get_net_prefix(new_net_uuid)
2562
      old_prefix = get_net_prefix(old_net_uuid)
2563
      if old_prefix != new_prefix:
2564
        params[constants.INIC_MAC] = \
2565
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2566

    
2567
    # if there is a change in (ip, network) tuple
2568
    new_ip = params.get(constants.INIC_IP, old_ip)
2569
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2570
      if new_ip:
2571
        # if IP is pool then require a network and generate one IP
2572
        if new_ip.lower() == constants.NIC_IP_POOL:
2573
          if new_net_uuid:
2574
            try:
2575
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2576
            except errors.ReservationError:
2577
              raise errors.OpPrereqError("Unable to get a free IP"
2578
                                         " from the address pool",
2579
                                         errors.ECODE_STATE)
2580
            self.LogInfo("Chose IP %s from network %s",
2581
                         new_ip,
2582
                         new_net_obj.name)
2583
            params[constants.INIC_IP] = new_ip
2584
          else:
2585
            raise errors.OpPrereqError("ip=pool, but no network found",
2586
                                       errors.ECODE_INVAL)
2587
        # Reserve new IP if in the new network if any
2588
        elif new_net_uuid:
2589
          try:
2590
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2591
                               check=self.op.conflicts_check)
2592
            self.LogInfo("Reserving IP %s in network %s",
2593
                         new_ip, new_net_obj.name)
2594
          except errors.ReservationError:
2595
            raise errors.OpPrereqError("IP %s not available in network %s" %
2596
                                       (new_ip, new_net_obj.name),
2597
                                       errors.ECODE_NOTUNIQUE)
2598
        # new network is None so check if new IP is a conflicting IP
2599
        elif self.op.conflicts_check:
2600
          _CheckForConflictingIp(self, new_ip, pnode)
2601

    
2602
      # release old IP if old network is not None
2603
      if old_ip and old_net_uuid:
2604
        try:
2605
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2606
        except errors.AddressPoolError:
2607
          logging.warning("Release IP %s not contained in network %s",
2608
                          old_ip, old_net_obj.name)
2609

    
2610
    # there are no changes in (ip, network) tuple and old network is not None
2611
    elif (old_net_uuid is not None and
2612
          (req_link is not None or req_mode is not None)):
2613
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2614
                                 " a NIC that is connected to a network",
2615
                                 errors.ECODE_INVAL)
2616

    
2617
    private.params = new_params
2618
    private.filled = new_filled_params
2619

    
2620
  def _PreCheckDiskTemplate(self, pnode_info):
2621
    """CheckPrereq checks related to a new disk template."""
2622
    # Arguments are passed to avoid configuration lookups
2623
    instance = self.instance
2624
    pnode = instance.primary_node
2625
    cluster = self.cluster
2626
    if instance.disk_template == self.op.disk_template:
2627
      raise errors.OpPrereqError("Instance already has disk template %s" %
2628
                                 instance.disk_template, errors.ECODE_INVAL)
2629

    
2630
    if (instance.disk_template,
2631
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2632
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2633
                                 " %s to %s" % (instance.disk_template,
2634
                                                self.op.disk_template),
2635
                                 errors.ECODE_INVAL)
2636
    CheckInstanceState(self, instance, INSTANCE_DOWN,
2637
                       msg="cannot change disk template")
2638
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2639
      if self.op.remote_node == pnode:
2640
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2641
                                   " as the primary node of the instance" %
2642
                                   self.op.remote_node, errors.ECODE_STATE)
2643
      CheckNodeOnline(self, self.op.remote_node)
2644
      CheckNodeNotDrained(self, self.op.remote_node)
2645
      # FIXME: here we assume that the old instance type is DT_PLAIN
2646
      assert instance.disk_template == constants.DT_PLAIN
2647
      disks = [{constants.IDISK_SIZE: d.size,
2648
                constants.IDISK_VG: d.logical_id[0]}
2649
               for d in instance.disks]
2650
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2651
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2652

    
2653
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2654
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2655
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2656
                                                              snode_group)
2657
      CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2658
                             ignore=self.op.ignore_ipolicy)
2659
      if pnode_info.group != snode_info.group:
2660
        self.LogWarning("The primary and secondary nodes are in two"
2661
                        " different node groups; the disk parameters"
2662
                        " from the first disk's node group will be"
2663
                        " used")
2664

    
2665
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2666
      # Make sure none of the nodes require exclusive storage
2667
      nodes = [pnode_info]
2668
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2669
        assert snode_info
2670
        nodes.append(snode_info)
2671
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2672
      if compat.any(map(has_es, nodes)):
2673
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2674
                  " storage is enabled" % (instance.disk_template,
2675
                                           self.op.disk_template))
2676
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2677

    
2678
  # too many local variables
2679
  # pylint: disable=R0914
2680
  def CheckPrereq(self):
2681
    """Check prerequisites.
2682

2683
    This only checks the instance list against the existing names.
2684

2685
    """
2686
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2687
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2688

    
2689
    cluster = self.cluster = self.cfg.GetClusterInfo()
2690
    assert self.instance is not None, \
2691
      "Cannot retrieve locked instance %s" % self.op.instance_name
2692

    
2693
    pnode = instance.primary_node
2694

    
2695
    self.warn = []
2696

    
2697
    if (self.op.pnode is not None and self.op.pnode != pnode and
2698
        not self.op.force):
2699
      # verify that the instance is not up
2700
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2701
                                                  instance.hypervisor)
2702
      if instance_info.fail_msg:
2703
        self.warn.append("Can't get instance runtime information: %s" %
2704
                         instance_info.fail_msg)
2705
      elif instance_info.payload:
2706
        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2707
                                   errors.ECODE_STATE)
2708

    
2709
    assert pnode in self.owned_locks(locking.LEVEL_NODE)
2710
    nodelist = list(instance.all_nodes)
2711
    pnode_info = self.cfg.GetNodeInfo(pnode)
2712
    self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2713

    
2714
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2715
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2716
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2717

    
2718
    # dictionary with instance information after the modification
2719
    ispec = {}
2720

    
2721
    # Check disk modifications. This is done here and not in CheckArguments
2722
    # (as with NICs), because we need to know the instance's disk template
2723
    if instance.disk_template == constants.DT_EXT:
2724
      self._CheckMods("disk", self.op.disks, {},
2725
                      self._VerifyDiskModification)
2726
    else:
2727
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2728
                      self._VerifyDiskModification)
2729

    
2730
    # Prepare disk/NIC modifications
2731
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2732
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2733

    
2734
    # Check the validity of the `provider' parameter
2735
    if instance.disk_template in constants.DT_EXT:
2736
      for mod in self.diskmod:
2737
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2738
        if mod[0] == constants.DDM_ADD:
2739
          if ext_provider is None:
2740
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2741
                                       " '%s' missing, during disk add" %
2742
                                       (constants.DT_EXT,
2743
                                        constants.IDISK_PROVIDER),
2744
                                       errors.ECODE_NOENT)
2745
        elif mod[0] == constants.DDM_MODIFY:
2746
          if ext_provider:
2747
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2748
                                       " modification" %
2749
                                       constants.IDISK_PROVIDER,
2750
                                       errors.ECODE_INVAL)
2751
    else:
2752
      for mod in self.diskmod:
2753
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2754
        if ext_provider is not None:
2755
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2756
                                     " instances of type '%s'" %
2757
                                     (constants.IDISK_PROVIDER,
2758
                                      constants.DT_EXT),
2759
                                     errors.ECODE_INVAL)
2760

    
2761
    if self.op.hotplug or self.op.hotplug_if_possible:
2762
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2763
                                               self.instance)
2764
      if result.fail_msg:
2765
        if self.op.hotplug:
2766
          result.Raise("Hotplug is not possible: %s" % result.fail_msg,
2767
                       prereq=True)
2768
        else:
2769
          self.LogWarning(result.fail_msg)
2770
          self.op.hotplug = False
2771
          self.LogInfo("Modification will take place without hotplugging.")
2772
      else:
2773
        self.op.hotplug = True
2774

    
2775
    # OS change
2776
    if self.op.os_name and not self.op.force:
2777
      CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2778
                     self.op.force_variant)
2779
      instance_os = self.op.os_name
2780
    else:
2781
      instance_os = instance.os
2782

    
2783
    assert not (self.op.disk_template and self.op.disks), \
2784
      "Can't modify disk template and apply disk changes at the same time"
2785

    
2786
    if self.op.disk_template:
2787
      self._PreCheckDiskTemplate(pnode_info)
2788

    
2789
    # hvparams processing
2790
    if self.op.hvparams:
2791
      hv_type = instance.hypervisor
2792
      i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2793
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2794
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2795

    
2796
      # local check
2797
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2798
      CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2799
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2800
      self.hv_inst = i_hvdict # the new dict (without defaults)
2801
    else:
2802
      self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2803
                                              instance.hvparams)
2804
      self.hv_new = self.hv_inst = {}
2805

    
2806
    # beparams processing
2807
    if self.op.beparams:
2808
      i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2809
                                  use_none=True)
2810
      objects.UpgradeBeParams(i_bedict)
2811
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2812
      be_new = cluster.SimpleFillBE(i_bedict)
2813
      self.be_proposed = self.be_new = be_new # the new actual values
2814
      self.be_inst = i_bedict # the new dict (without defaults)
2815
    else:
2816
      self.be_new = self.be_inst = {}
2817
      self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2818
    be_old = cluster.FillBE(instance)
2819

    
2820
    # CPU param validation -- checking every time a parameter is
2821
    # changed to cover all cases where either CPU mask or vcpus have
2822
    # changed
2823
    if (constants.BE_VCPUS in self.be_proposed and
2824
        constants.HV_CPU_MASK in self.hv_proposed):
2825
      cpu_list = \
2826
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2827
      # Verify mask is consistent with number of vCPUs. Can skip this
2828
      # test if only 1 entry in the CPU mask, which means same mask
2829
      # is applied to all vCPUs.
2830
      if (len(cpu_list) > 1 and
2831
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2832
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2833
                                   " CPU mask [%s]" %
2834
                                   (self.be_proposed[constants.BE_VCPUS],
2835
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2836
                                   errors.ECODE_INVAL)
2837

    
2838
      # Only perform this test if a new CPU mask is given
2839
      if constants.HV_CPU_MASK in self.hv_new:
2840
        # Calculate the largest CPU number requested
2841
        max_requested_cpu = max(map(max, cpu_list))
2842
        # Check that all of the instance's nodes have enough physical CPUs to
2843
        # satisfy the requested CPU mask
2844
        _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2845
                                max_requested_cpu + 1, instance.hypervisor)
2846

    
2847
    # osparams processing
2848
    if self.op.osparams:
2849
      i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2850
      CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2851
      self.os_inst = i_osdict # the new dict (without defaults)
2852
    else:
2853
      self.os_inst = {}
2854

    
2855
    #TODO(dynmem): do the appropriate check involving MINMEM
2856
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2857
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2858
      mem_check_list = [pnode]
2859
      if be_new[constants.BE_AUTO_BALANCE]:
2860
        # either we changed auto_balance to yes or it was from before
2861
        mem_check_list.extend(instance.secondary_nodes)
2862
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2863
                                                  instance.hypervisor)
2864
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2865
                                         [instance.hypervisor], False)
2866
      pninfo = nodeinfo[pnode]
2867
      msg = pninfo.fail_msg
2868
      if msg:
2869
        # Assume the primary node is unreachable and go ahead
2870
        self.warn.append("Can't get info from primary node %s: %s" %
2871
                         (pnode, msg))
2872
      else:
2873
        (_, _, (pnhvinfo, )) = pninfo.payload
2874
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2875
          self.warn.append("Node data from primary node %s doesn't contain"
2876
                           " free memory information" % pnode)
2877
        elif instance_info.fail_msg:
2878
          self.warn.append("Can't get instance runtime information: %s" %
2879
                           instance_info.fail_msg)
2880
        else:
2881
          if instance_info.payload:
2882
            current_mem = int(instance_info.payload["memory"])
2883
          else:
2884
            # Assume instance not running
2885
            # (there is a slight race condition here, but it's not very
2886
            # probable, and we have no other way to check)
2887
            # TODO: Describe race condition
2888
            current_mem = 0
2889
          #TODO(dynmem): do the appropriate check involving MINMEM
2890
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2891
                      pnhvinfo["memory_free"])
2892
          if miss_mem > 0:
2893
            raise errors.OpPrereqError("This change will prevent the instance"
2894
                                       " from starting, due to %d MB of memory"
2895
                                       " missing on its primary node" %
2896
                                       miss_mem, errors.ECODE_NORES)
2897

    
2898
      if be_new[constants.BE_AUTO_BALANCE]:
2899
        for node, nres in nodeinfo.items():
2900
          if node not in instance.secondary_nodes:
2901
            continue
2902
          nres.Raise("Can't get info from secondary node %s" % node,
2903
                     prereq=True, ecode=errors.ECODE_STATE)
2904
          (_, _, (nhvinfo, )) = nres.payload
2905
          if not isinstance(nhvinfo.get("memory_free", None), int):
2906
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2907
                                       " memory information" % node,
2908
                                       errors.ECODE_STATE)
2909
          #TODO(dynmem): do the appropriate check involving MINMEM
2910
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2911
            raise errors.OpPrereqError("This change will prevent the instance"
2912
                                       " from failover to its secondary node"
2913
                                       " %s, due to not enough memory" % node,
2914
                                       errors.ECODE_STATE)
2915

    
2916
    if self.op.runtime_mem:
2917
      remote_info = self.rpc.call_instance_info(instance.primary_node,
2918
                                                instance.name,
2919
                                                instance.hypervisor)
2920
      remote_info.Raise("Error checking node %s" % instance.primary_node)
2921
      if not remote_info.payload: # not running already
2922
        raise errors.OpPrereqError("Instance %s is not running" %
2923
                                   instance.name, errors.ECODE_STATE)
2924

    
2925
      current_memory = remote_info.payload["memory"]
2926
      if (not self.op.force and
2927
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2928
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2929
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2930
                                   " and %d MB of memory unless --force is"
2931
                                   " given" %
2932
                                   (instance.name,
2933
                                    self.be_proposed[constants.BE_MINMEM],
2934
                                    self.be_proposed[constants.BE_MAXMEM]),
2935
                                   errors.ECODE_INVAL)
2936

    
2937
      delta = self.op.runtime_mem - current_memory
2938
      if delta > 0:
2939
        CheckNodeFreeMemory(self, instance.primary_node,
2940
                            "ballooning memory for instance %s" %
2941
                            instance.name, delta, instance.hypervisor)
2942

    
2943
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2944
      raise errors.OpPrereqError("Disk operations not supported for"
2945
                                 " diskless instances", errors.ECODE_INVAL)
2946

    
2947
    def _PrepareNicCreate(_, params, private):
2948
      self._PrepareNicModification(params, private, None, None,
2949
                                   {}, cluster, pnode)
2950
      return (None, None)
2951

    
2952
    def _PrepareNicMod(_, nic, params, private):
2953
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2954
                                   nic.nicparams, cluster, pnode)
2955
      return None
2956

    
2957
    def _PrepareNicRemove(_, params, __):
2958
      ip = params.ip
2959
      net = params.network
2960
      if net is not None and ip is not None:
2961
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2962

    
2963
    # Verify NIC changes (operating on copy)
2964
    nics = instance.nics[:]
2965
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2966
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2967
    if len(nics) > constants.MAX_NICS:
2968
      raise errors.OpPrereqError("Instance has too many network interfaces"
2969
                                 " (%d), cannot add more" % constants.MAX_NICS,
2970
                                 errors.ECODE_STATE)
2971

    
2972
    def _PrepareDiskMod(_, disk, params, __):
2973
      disk.name = params.get(constants.IDISK_NAME, None)
2974

    
2975
    # Verify disk changes (operating on a copy)
2976
    disks = copy.deepcopy(instance.disks)
2977
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2978
                        _PrepareDiskMod, None)
2979
    utils.ValidateDeviceNames("disk", disks)
2980
    if len(disks) > constants.MAX_DISKS:
2981
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2982
                                 " more" % constants.MAX_DISKS,
2983
                                 errors.ECODE_STATE)
2984
    disk_sizes = [disk.size for disk in instance.disks]
2985
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2986
                      self.diskmod if op == constants.DDM_ADD)
2987
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2988
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2989

    
2990
    if self.op.offline is not None and self.op.offline:
2991
      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2992
                         msg="can't change to offline")
2993

    
2994
    # Pre-compute NIC changes (necessary to use result in hooks)
2995
    self._nic_chgdesc = []
2996
    if self.nicmod:
2997
      # Operate on copies as this is still in prereq
2998
      nics = [nic.Copy() for nic in instance.nics]
2999
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3000
                          self._CreateNewNic, self._ApplyNicMods,
3001
                          self._RemoveNic)
3002
      # Verify that NIC names are unique and valid
3003
      utils.ValidateDeviceNames("NIC", nics)
3004
      self._new_nics = nics
3005
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3006
    else:
3007
      self._new_nics = None
3008
      ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
3009

    
3010
    if not self.op.ignore_ipolicy:
3011
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
3012
                                                              group_info)
3013

    
3014
      # Fill ispec with backend parameters
3015
      ispec[constants.ISPEC_SPINDLE_USE] = \
3016
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3017
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3018
                                                         None)
3019

    
3020
      # Copy ispec to verify parameters with min/max values separately
3021
      if self.op.disk_template:
3022
        new_disk_template = self.op.disk_template
3023
      else:
3024
        new_disk_template = instance.disk_template
3025
      ispec_max = ispec.copy()
3026
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3027
        self.be_new.get(constants.BE_MAXMEM, None)
3028
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3029
                                                     new_disk_template)
3030
      ispec_min = ispec.copy()
3031
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3032
        self.be_new.get(constants.BE_MINMEM, None)
3033
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3034
                                                     new_disk_template)
3035

    
3036
      if (res_max or res_min):
3037
        # FIXME: Improve error message by including information about whether
3038
        # the upper or lower limit of the parameter fails the ipolicy.
3039
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3040
               (group_info, group_info.name,
3041
                utils.CommaJoin(set(res_max + res_min))))
3042
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3043

    
3044
  def _ConvertPlainToDrbd(self, feedback_fn):
3045
    """Converts an instance from plain to drbd.
3046

3047
    """
3048
    feedback_fn("Converting template to drbd")
3049
    instance = self.instance
3050
    pnode = instance.primary_node
3051
    snode = self.op.remote_node
3052

    
3053
    assert instance.disk_template == constants.DT_PLAIN
3054

    
3055
    # create a fake disk info for _GenerateDiskTemplate
3056
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3057
                  constants.IDISK_VG: d.logical_id[0],
3058
                  constants.IDISK_NAME: d.name}
3059
                 for d in instance.disks]
3060
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3061
                                     instance.name, pnode, [snode],
3062
                                     disk_info, None, None, 0, feedback_fn,
3063
                                     self.diskparams)
3064
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3065
                                        self.diskparams)
3066
    p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
3067
    s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
3068
    info = GetInstanceInfoText(instance)
3069
    feedback_fn("Creating additional volumes...")
3070
    # first, create the missing data and meta devices
3071
    for disk in anno_disks:
3072
      # unfortunately this is... not too nice
3073
      CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3074
                           info, True, p_excl_stor)
3075
      for child in disk.children:
3076
        CreateSingleBlockDev(self, snode, instance, child, info, True,
3077
                             s_excl_stor)
3078
    # at this stage, all new LVs have been created, we can rename the
3079
    # old ones
3080
    feedback_fn("Renaming original volumes...")
3081
    rename_list = [(o, n.children[0].logical_id)
3082
                   for (o, n) in zip(instance.disks, new_disks)]
3083
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
3084
    result.Raise("Failed to rename original LVs")
3085

    
3086
    feedback_fn("Initializing DRBD devices...")
3087
    # all child devices are in place, we can now create the DRBD devices
3088
    try:
3089
      for disk in anno_disks:
3090
        for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3091
          f_create = node == pnode
3092
          CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3093
                               excl_stor)
3094
    except errors.GenericError, e:
3095
      feedback_fn("Initializing of DRBD devices failed;"
3096
                  " renaming back original volumes...")
3097
      for disk in new_disks:
3098
        self.cfg.SetDiskID(disk, pnode)
3099
      rename_back_list = [(n.children[0], o.logical_id)
3100
                          for (n, o) in zip(new_disks, instance.disks)]
3101
      result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3102
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3103
      raise
3104

    
3105
    # at this point, the instance has been modified
3106
    instance.disk_template = constants.DT_DRBD8
3107
    instance.disks = new_disks
3108
    self.cfg.Update(instance, feedback_fn)
3109

    
3110
    # Release node locks while waiting for sync
3111
    ReleaseLocks(self, locking.LEVEL_NODE)
3112

    
3113
    # disks are created, waiting for sync
3114
    disk_abort = not WaitForSync(self, instance,
3115
                                 oneshot=not self.op.wait_for_sync)
3116
    if disk_abort:
3117
      raise errors.OpExecError("There are some degraded disks for"
3118
                               " this instance, please cleanup manually")
3119

    
3120
    # Node resource locks will be released by caller
3121

    
3122
  def _ConvertDrbdToPlain(self, feedback_fn):
3123
    """Converts an instance from drbd to plain.
3124

3125
    """
3126
    instance = self.instance
3127

    
3128
    assert len(instance.secondary_nodes) == 1
3129
    assert instance.disk_template == constants.DT_DRBD8
3130

    
3131
    pnode = instance.primary_node
3132
    snode = instance.secondary_nodes[0]
3133
    feedback_fn("Converting template to plain")
3134

    
3135
    old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3136
    new_disks = [d.children[0] for d in instance.disks]
3137

    
3138
    # copy over size, mode and name
3139
    for parent, child in zip(old_disks, new_disks):
3140
      child.size = parent.size
3141
      child.mode = parent.mode
3142
      child.name = parent.name
3143

    
3144
    # this is a DRBD disk, return its port to the pool
3145
    # NOTE: this must be done right before the call to cfg.Update!
3146
    for disk in old_disks:
3147
      tcp_port = disk.logical_id[2]
3148
      self.cfg.AddTcpUdpPort(tcp_port)
3149

    
3150
    # update instance structure
3151
    instance.disks = new_disks
3152
    instance.disk_template = constants.DT_PLAIN
3153
    _UpdateIvNames(0, instance.disks)
3154
    self.cfg.Update(instance, feedback_fn)
3155

    
3156
    # Release locks in case removing disks takes a while
3157
    ReleaseLocks(self, locking.LEVEL_NODE)
3158

    
3159
    feedback_fn("Removing volumes on the secondary node...")
3160
    for disk in old_disks:
3161
      self.cfg.SetDiskID(disk, snode)
3162
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3163
      if msg:
3164
        self.LogWarning("Could not remove block device %s on node %s,"
3165
                        " continuing anyway: %s", disk.iv_name, snode, msg)
3166

    
3167
    feedback_fn("Removing unneeded volumes on the primary node...")
3168
    for idx, disk in enumerate(old_disks):
3169
      meta = disk.children[1]
3170
      self.cfg.SetDiskID(meta, pnode)
3171
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3172
      if msg:
3173
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3174
                        " continuing anyway: %s", idx, pnode, msg)
3175

    
3176
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3177
    self.LogInfo("Trying to hotplug device...")
3178
    msg = "hotplug:"
3179
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3180
                                          self.instance, action, dev_type,
3181
                                          (device, self.instance),
3182
                                          extra, seq)
3183
    if result.fail_msg:
3184
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3185
      self.LogInfo("Continuing execution..")
3186
      msg += "failed"
3187
    else:
3188
      self.LogInfo("Hotplug done.")
3189
      msg += "done"
3190
    return msg
3191

    
3192
  def _CreateNewDisk(self, idx, params, _):
3193
    """Creates a new disk.
3194

3195
    """
3196
    instance = self.instance
3197

    
3198
    # add a new disk
3199
    if instance.disk_template in constants.DTS_FILEBASED:
3200
      (file_driver, file_path) = instance.disks[0].logical_id
3201
      file_path = os.path.dirname(file_path)
3202
    else:
3203
      file_driver = file_path = None
3204

    
3205
    disk = \
3206
      GenerateDiskTemplate(self, instance.disk_template, instance.name,
3207
                           instance.primary_node, instance.secondary_nodes,
3208
                           [params], file_path, file_driver, idx,
3209
                           self.Log, self.diskparams)[0]
3210

    
3211
    new_disks = CreateDisks(self, instance, disks=[disk])
3212

    
3213
    if self.cluster.prealloc_wipe_disks:
3214
      # Wipe new disk
3215
      WipeOrCleanupDisks(self, instance,
3216
                         disks=[(idx, disk, 0)],
3217
                         cleanup=new_disks)
3218

    
3219
    changes = [
3220
      ("disk/%d" % idx,
3221
      "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3222
      ]
3223
    if self.op.hotplug:
3224
      self.cfg.SetDiskID(disk, self.instance.primary_node)
3225
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3226
                                               (disk, self.instance),
3227
                                               self.instance.name, True, idx)
3228
      if result.fail_msg:
3229
        changes.append(("disk/%d" % idx, "assemble:failed"))
3230
        self.LogWarning("Can't assemble newly created disk %d: %s",
3231
                        idx, result.fail_msg)
3232
      else:
3233
        _, link_name = result.payload
3234
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3235
                                  constants.HOTPLUG_TARGET_DISK,
3236
                                  disk, link_name, idx)
3237
        changes.append(("disk/%d" % idx, msg))
3238

    
3239
    return (disk, changes)
3240

    
3241
  def _ModifyDisk(self, idx, disk, params, _):
3242
    """Modifies a disk.
3243

3244
    """
3245
    changes = []
3246
    if constants.IDISK_MODE in params:
3247
      disk.mode = params.get(constants.IDISK_MODE)
3248
      changes.append(("disk.mode/%d" % idx, disk.mode))
3249

    
3250
    if constants.IDISK_NAME in params:
3251
      disk.name = params.get(constants.IDISK_NAME)
3252
      changes.append(("disk.name/%d" % idx, disk.name))
3253

    
3254
    # Modify arbitrary params in case instance template is ext
3255
    for key, value in params.iteritems():
3256
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3257
          self.instance.disk_template == constants.DT_EXT):
3258
        # stolen from GetUpdatedParams: default means reset/delete
3259
        if value.lower() == constants.VALUE_DEFAULT:
3260
          try:
3261
            del disk.params[key]
3262
          except KeyError:
3263
            pass
3264
        else:
3265
          disk.params[key] = value
3266
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3267

    
3268
    return changes
3269

    
3270
  def _RemoveDisk(self, idx, root, _):
3271
    """Removes a disk.
3272

3273
    """
3274
    hotmsg = ""
3275
    if self.op.hotplug:
3276
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3277
                                   constants.HOTPLUG_TARGET_DISK,
3278
                                   root, None, idx)
3279
      ShutdownInstanceDisks(self, self.instance, [root])
3280

    
3281
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3282
    for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3283
      self.cfg.SetDiskID(disk, node)
3284
      msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3285
      if msg:
3286
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3287
                        " continuing anyway", idx, node, msg)
3288

    
3289
    # if this is a DRBD disk, return its port to the pool
3290
    if root.dev_type in constants.LDS_DRBD:
3291
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3292

    
3293
    return hotmsg
3294

    
3295
  def _CreateNewNic(self, idx, params, private):
3296
    """Creates data structure for a new network interface.
3297

3298
    """
3299
    mac = params[constants.INIC_MAC]
3300
    ip = params.get(constants.INIC_IP, None)
3301
    net = params.get(constants.INIC_NETWORK, None)
3302
    name = params.get(constants.INIC_NAME, None)
3303
    net_uuid = self.cfg.LookupNetwork(net)
3304
    #TODO: not private.filled?? can a nic have no nicparams??
3305
    nicparams = private.filled
3306
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3307
                       nicparams=nicparams)
3308
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3309

    
3310
    changes = [
3311
      ("nic.%d" % idx,
3312
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3313
       (mac, ip, private.filled[constants.NIC_MODE],
3314
       private.filled[constants.NIC_LINK], net)),
3315
      ]
3316

    
3317
    if self.op.hotplug:
3318
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3319
                                constants.HOTPLUG_TARGET_NIC,
3320
                                nobj, None, idx)
3321
      changes.append(("nic.%d" % idx, msg))
3322

    
3323
    return (nobj, changes)
3324

    
3325
  def _ApplyNicMods(self, idx, nic, params, private):
3326
    """Modifies a network interface.
3327

3328
    """
3329
    changes = []
3330

    
3331
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3332
      if key in params:
3333
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3334
        setattr(nic, key, params[key])
3335

    
3336
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3337
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3338
    if new_net_uuid != nic.network:
3339
      changes.append(("nic.network/%d" % idx, new_net))
3340
      nic.network = new_net_uuid
3341

    
3342
    if private.filled:
3343
      nic.nicparams = private.filled
3344

    
3345
      for (key, val) in nic.nicparams.items():
3346
        changes.append(("nic.%s/%d" % (key, idx), val))
3347

    
3348
    if self.op.hotplug:
3349
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3350
                                constants.HOTPLUG_TARGET_NIC,
3351
                                nic, None, idx)
3352
      changes.append(("nic/%d" % idx, msg))
3353

    
3354
    return changes
3355

    
3356
  def _RemoveNic(self, idx, nic, _):
3357
    if self.op.hotplug:
3358
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3359
                                 constants.HOTPLUG_TARGET_NIC,
3360
                                 nic, None, idx)
3361

    
3362
  def Exec(self, feedback_fn):
3363
    """Modifies an instance.
3364

3365
    All parameters take effect only at the next restart of the instance.
3366

3367
    """
3368
    # Process here the warnings from CheckPrereq, as we don't have a
3369
    # feedback_fn there.
3370
    # TODO: Replace with self.LogWarning
3371
    for warn in self.warn:
3372
      feedback_fn("WARNING: %s" % warn)
3373

    
3374
    assert ((self.op.disk_template is None) ^
3375
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3376
      "Not owning any node resource locks"
3377

    
3378
    result = []
3379
    instance = self.instance
3380

    
3381
    # New primary node
3382
    if self.op.pnode:
3383
      instance.primary_node = self.op.pnode
3384

    
3385
    # runtime memory
3386
    if self.op.runtime_mem:
3387
      rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3388
                                                     instance,
3389
                                                     self.op.runtime_mem)
3390
      rpcres.Raise("Cannot modify instance runtime memory")
3391
      result.append(("runtime_memory", self.op.runtime_mem))
3392

    
3393
    # Apply disk changes
3394
    _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3395
                        self._CreateNewDisk, self._ModifyDisk,
3396
                        self._RemoveDisk)
3397
    _UpdateIvNames(0, instance.disks)
3398

    
3399
    if self.op.disk_template:
3400
      if __debug__:
3401
        check_nodes = set(instance.all_nodes)
3402
        if self.op.remote_node:
3403
          check_nodes.add(self.op.remote_node)
3404
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3405
          owned = self.owned_locks(level)
3406
          assert not (check_nodes - owned), \
3407
            ("Not owning the correct locks, owning %r, expected at least %r" %
3408
             (owned, check_nodes))
3409

    
3410
      r_shut = ShutdownInstanceDisks(self, instance)
3411
      if not r_shut:
3412
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3413
                                 " proceed with disk template conversion")
3414
      mode = (instance.disk_template, self.op.disk_template)
3415
      try:
3416
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3417
      except:
3418
        self.cfg.ReleaseDRBDMinors(instance.name)
3419
        raise
3420
      result.append(("disk_template", self.op.disk_template))
3421

    
3422
      assert instance.disk_template == self.op.disk_template, \
3423
        ("Expected disk template '%s', found '%s'" %
3424
         (self.op.disk_template, instance.disk_template))
3425

    
3426
    # Release node and resource locks if there are any (they might already have
3427
    # been released during disk conversion)
3428
    ReleaseLocks(self, locking.LEVEL_NODE)
3429
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3430

    
3431
    # Apply NIC changes
3432
    if self._new_nics is not None:
3433
      instance.nics = self._new_nics
3434
      result.extend(self._nic_chgdesc)
3435

    
3436
    # hvparams changes
3437
    if self.op.hvparams:
3438
      instance.hvparams = self.hv_inst
3439
      for key, val in self.op.hvparams.iteritems():
3440
        result.append(("hv/%s" % key, val))
3441

    
3442
    # beparams changes
3443
    if self.op.beparams:
3444
      instance.beparams = self.be_inst
3445
      for key, val in self.op.beparams.iteritems():
3446
        result.append(("be/%s" % key, val))
3447

    
3448
    # OS change
3449
    if self.op.os_name:
3450
      instance.os = self.op.os_name
3451

    
3452
    # osparams changes
3453
    if self.op.osparams:
3454
      instance.osparams = self.os_inst
3455
      for key, val in self.op.osparams.iteritems():
3456
        result.append(("os/%s" % key, val))
3457

    
3458
    if self.op.offline is None:
3459
      # Ignore
3460
      pass
3461
    elif self.op.offline:
3462
      # Mark instance as offline
3463
      self.cfg.MarkInstanceOffline(instance.name)
3464
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3465
    else:
3466
      # Mark instance as online, but stopped
3467
      self.cfg.MarkInstanceDown(instance.name)
3468
      result.append(("admin_state", constants.ADMINST_DOWN))
3469

    
3470
    self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3471

    
3472
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3473
                self.owned_locks(locking.LEVEL_NODE)), \
3474
      "All node locks should have been released by now"
3475

    
3476
    return result
3477

    
3478
  _DISK_CONVERSIONS = {
3479
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3480
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3481
    }
3482

    
3483

    
3484
class LUInstanceChangeGroup(LogicalUnit):
3485
  HPATH = "instance-change-group"
3486
  HTYPE = constants.HTYPE_INSTANCE
3487
  REQ_BGL = False
3488

    
3489
  def ExpandNames(self):
3490
    self.share_locks = ShareAll()
3491

    
3492
    self.needed_locks = {
3493
      locking.LEVEL_NODEGROUP: [],
3494
      locking.LEVEL_NODE: [],
3495
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3496
      }
3497

    
3498
    self._ExpandAndLockInstance()
3499

    
3500
    if self.op.target_groups:
3501
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3502
                                  self.op.target_groups)
3503
    else:
3504
      self.req_target_uuids = None
3505

    
3506
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3507

    
3508
  def DeclareLocks(self, level):
3509
    if level == locking.LEVEL_NODEGROUP:
3510
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3511

    
3512
      if self.req_target_uuids:
3513
        lock_groups = set(self.req_target_uuids)
3514

    
3515
        # Lock all groups used by instance optimistically; this requires going
3516
        # via the node before it's locked, requiring verification later on
3517
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3518
        lock_groups.update(instance_groups)
3519
      else:
3520
        # No target groups, need to lock all of them
3521
        lock_groups = locking.ALL_SET
3522

    
3523
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3524

    
3525
    elif level == locking.LEVEL_NODE:
3526
      if self.req_target_uuids:
3527
        # Lock all nodes used by instances
3528
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3529
        self._LockInstancesNodes()
3530

    
3531
        # Lock all nodes in all potential target groups
3532
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3533
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3534
        member_nodes = [node_name
3535
                        for group in lock_groups
3536
                        for node_name in self.cfg.GetNodeGroup(group).members]
3537
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3538
      else:
3539
        # Lock all nodes as all groups are potential targets
3540
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3541

    
3542
  def CheckPrereq(self):
3543
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3544
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3545
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3546

    
3547
    assert (self.req_target_uuids is None or
3548
            owned_groups.issuperset(self.req_target_uuids))
3549
    assert owned_instances == set([self.op.instance_name])
3550

    
3551
    # Get instance information
3552
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3553

    
3554
    # Check if node groups for locked instance are still correct
3555
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3556
      ("Instance %s's nodes changed while we kept the lock" %
3557
       self.op.instance_name)
3558

    
3559
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3560
                                          owned_groups)
3561

    
3562
    if self.req_target_uuids:
3563
      # User requested specific target groups
3564
      self.target_uuids = frozenset(self.req_target_uuids)
3565
    else:
3566
      # All groups except those used by the instance are potential targets
3567
      self.target_uuids = owned_groups - inst_groups
3568

    
3569
    conflicting_groups = self.target_uuids & inst_groups
3570
    if conflicting_groups:
3571
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3572
                                 " used by the instance '%s'" %
3573
                                 (utils.CommaJoin(conflicting_groups),
3574
                                  self.op.instance_name),
3575
                                 errors.ECODE_INVAL)
3576

    
3577
    if not self.target_uuids:
3578
      raise errors.OpPrereqError("There are no possible target groups",
3579
                                 errors.ECODE_INVAL)
3580

    
3581
  def BuildHooksEnv(self):
3582
    """Build hooks env.
3583

3584
    """
3585
    assert self.target_uuids
3586

    
3587
    env = {
3588
      "TARGET_GROUPS": " ".join(self.target_uuids),
3589
      }
3590

    
3591
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3592

    
3593
    return env
3594

    
3595
  def BuildHooksNodes(self):
3596
    """Build hooks nodes.
3597

3598
    """
3599
    mn = self.cfg.GetMasterNode()
3600
    return ([mn], [mn])
3601

    
3602
  def Exec(self, feedback_fn):
3603
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3604

    
3605
    assert instances == [self.op.instance_name], "Instance not locked"
3606

    
3607
    req = iallocator.IAReqGroupChange(instances=instances,
3608
                                      target_groups=list(self.target_uuids))
3609
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3610

    
3611
    ial.Run(self.op.iallocator)
3612

    
3613
    if not ial.success:
3614
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3615
                                 " instance '%s' using iallocator '%s': %s" %
3616
                                 (self.op.instance_name, self.op.iallocator,
3617
                                  ial.info), errors.ECODE_NORES)
3618

    
3619
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3620

    
3621
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3622
                 " instance '%s'", len(jobs), self.op.instance_name)
3623

    
3624
    return ResultWithJobs(jobs)