Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 4c6e8e1a

History | View | Annotate | Download (140.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
64

    
65
import ganeti.masterd.instance
66

    
67

    
68
#: Type description for changes as returned by L{_ApplyContainerMods}'s
69
#: callbacks
70
_TApplyContModsCbChanges = \
71
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
72
    ht.TNonEmptyString,
73
    ht.TAny,
74
    ])))
75

    
76

    
77
def _CheckHostnameSane(lu, name):
78
  """Ensures that a given hostname resolves to a 'sane' name.
79

80
  The given name is required to be a prefix of the resolved hostname,
81
  to prevent accidental mismatches.
82

83
  @param lu: the logical unit on behalf of which we're checking
84
  @param name: the name we should resolve and check
85
  @return: the resolved hostname object
86

87
  """
88
  hostname = netutils.GetHostname(name=name)
89
  if hostname.name != name:
90
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91
  if not utils.MatchNameComponent(name, [hostname.name]):
92
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93
                                " same as given hostname '%s'") %
94
                               (hostname.name, name), errors.ECODE_INVAL)
95
  return hostname
96

    
97

    
98
def _CheckOpportunisticLocking(op):
99
  """Generate error if opportunistic locking is not possible.
100

101
  """
102
  if op.opportunistic_locking and not op.iallocator:
103
    raise errors.OpPrereqError("Opportunistic locking is only available in"
104
                               " combination with an instance allocator",
105
                               errors.ECODE_INVAL)
106

    
107

    
108
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109
  """Wrapper around IAReqInstanceAlloc.
110

111
  @param op: The instance opcode
112
  @param disks: The computed disks
113
  @param nics: The computed nics
114
  @param beparams: The full filled beparams
115
  @param node_whitelist: List of nodes which should appear as online to the
116
    allocator (unless the node is already marked offline)
117

118
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
119

120
  """
121
  spindle_use = beparams[constants.BE_SPINDLE_USE]
122
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123
                                       disk_template=op.disk_template,
124
                                       tags=op.tags,
125
                                       os=op.os_type,
126
                                       vcpus=beparams[constants.BE_VCPUS],
127
                                       memory=beparams[constants.BE_MAXMEM],
128
                                       spindle_use=spindle_use,
129
                                       disks=disks,
130
                                       nics=[n.ToDict() for n in nics],
131
                                       hypervisor=op.hypervisor,
132
                                       node_whitelist=node_whitelist)
133

    
134

    
135
def _ComputeFullBeParams(op, cluster):
136
  """Computes the full beparams.
137

138
  @param op: The instance opcode
139
  @param cluster: The cluster config object
140

141
  @return: The fully filled beparams
142

143
  """
144
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
145
  for param, value in op.beparams.iteritems():
146
    if value == constants.VALUE_AUTO:
147
      op.beparams[param] = default_beparams[param]
148
  objects.UpgradeBeParams(op.beparams)
149
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150
  return cluster.SimpleFillBE(op.beparams)
151

    
152

    
153
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154
  """Computes the nics.
155

156
  @param op: The instance opcode
157
  @param cluster: Cluster configuration object
158
  @param default_ip: The default ip to assign
159
  @param cfg: An instance of the configuration object
160
  @param ec_id: Execution context ID
161

162
  @returns: The build up nics
163

164
  """
165
  nics = []
166
  for nic in op.nics:
167
    nic_mode_req = nic.get(constants.INIC_MODE, None)
168
    nic_mode = nic_mode_req
169
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
171

    
172
    net = nic.get(constants.INIC_NETWORK, None)
173
    link = nic.get(constants.NIC_LINK, None)
174
    ip = nic.get(constants.INIC_IP, None)
175

    
176
    if net is None or net.lower() == constants.VALUE_NONE:
177
      net = None
178
    else:
179
      if nic_mode_req is not None or link is not None:
180
        raise errors.OpPrereqError("If network is given, no mode or link"
181
                                   " is allowed to be passed",
182
                                   errors.ECODE_INVAL)
183

    
184
    # ip validity checks
185
    if ip is None or ip.lower() == constants.VALUE_NONE:
186
      nic_ip = None
187
    elif ip.lower() == constants.VALUE_AUTO:
188
      if not op.name_check:
189
        raise errors.OpPrereqError("IP address set to auto but name checks"
190
                                   " have been skipped",
191
                                   errors.ECODE_INVAL)
192
      nic_ip = default_ip
193
    else:
194
      # We defer pool operations until later, so that the iallocator has
195
      # filled in the instance's node(s) dimara
196
      if ip.lower() == constants.NIC_IP_POOL:
197
        if net is None:
198
          raise errors.OpPrereqError("if ip=pool, parameter network"
199
                                     " must be passed too",
200
                                     errors.ECODE_INVAL)
201

    
202
      elif not netutils.IPAddress.IsValid(ip):
203
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
204
                                   errors.ECODE_INVAL)
205

    
206
      nic_ip = ip
207

    
208
    # TODO: check the ip address for uniqueness
209
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
211
                                 errors.ECODE_INVAL)
212

    
213
    # MAC address verification
214
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216
      mac = utils.NormalizeAndValidateMac(mac)
217

    
218
      try:
219
        # TODO: We need to factor this out
220
        cfg.ReserveMAC(mac, ec_id)
221
      except errors.ReservationError:
222
        raise errors.OpPrereqError("MAC address %s already in use"
223
                                   " in cluster" % mac,
224
                                   errors.ECODE_NOTUNIQUE)
225

    
226
    #  Build nic parameters
227
    nicparams = {}
228
    if nic_mode_req:
229
      nicparams[constants.NIC_MODE] = nic_mode
230
    if link:
231
      nicparams[constants.NIC_LINK] = link
232

    
233
    check_params = cluster.SimpleFillNIC(nicparams)
234
    objects.NIC.CheckParameterSyntax(check_params)
235
    net_uuid = cfg.LookupNetwork(net)
236
    name = nic.get(constants.INIC_NAME, None)
237
    if name is not None and name.lower() == constants.VALUE_NONE:
238
      name = None
239
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240
                          network=net_uuid, nicparams=nicparams)
241
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
242
    nics.append(nic_obj)
243

    
244
  return nics
245

    
246

    
247
def _CheckForConflictingIp(lu, ip, node):
248
  """In case of conflicting IP address raise error.
249

250
  @type ip: string
251
  @param ip: IP address
252
  @type node: string
253
  @param node: node name
254

255
  """
256
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257
  if conf_net is not None:
258
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259
                                " network %s, but the target NIC does not." %
260
                                (ip, conf_net)),
261
                               errors.ECODE_STATE)
262

    
263
  return (None, None)
264

    
265

    
266
def _ComputeIPolicyInstanceSpecViolation(
267
  ipolicy, instance_spec, disk_template,
268
  _compute_fn=ComputeIPolicySpecViolation):
269
  """Compute if instance specs meets the specs of ipolicy.
270

271
  @type ipolicy: dict
272
  @param ipolicy: The ipolicy to verify against
273
  @param instance_spec: dict
274
  @param instance_spec: The instance spec to verify
275
  @type disk_template: string
276
  @param disk_template: the disk template of the instance
277
  @param _compute_fn: The function to verify ipolicy (unittest only)
278
  @see: L{ComputeIPolicySpecViolation}
279

280
  """
281
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
287

    
288
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289
                     disk_sizes, spindle_use, disk_template)
290

    
291

    
292
def _CheckOSVariant(os_obj, name):
293
  """Check whether an OS name conforms to the os variants specification.
294

295
  @type os_obj: L{objects.OS}
296
  @param os_obj: OS object to check
297
  @type name: string
298
  @param name: OS name passed by the user, to check for validity
299

300
  """
301
  variant = objects.OS.GetVariant(name)
302
  if not os_obj.supported_variants:
303
    if variant:
304
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305
                                 " passed)" % (os_obj.name, variant),
306
                                 errors.ECODE_INVAL)
307
    return
308
  if not variant:
309
    raise errors.OpPrereqError("OS name must include a variant",
310
                               errors.ECODE_INVAL)
311

    
312
  if variant not in os_obj.supported_variants:
313
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
314

    
315

    
316
class LUInstanceCreate(LogicalUnit):
317
  """Create an instance.
318

319
  """
320
  HPATH = "instance-add"
321
  HTYPE = constants.HTYPE_INSTANCE
322
  REQ_BGL = False
323

    
324
  def CheckArguments(self):
325
    """Check arguments.
326

327
    """
328
    # do not require name_check to ease forward/backward compatibility
329
    # for tools
330
    if self.op.no_install and self.op.start:
331
      self.LogInfo("No-installation mode selected, disabling startup")
332
      self.op.start = False
333
    # validate/normalize the instance name
334
    self.op.instance_name = \
335
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
336

    
337
    if self.op.ip_check and not self.op.name_check:
338
      # TODO: make the ip check more flexible and not depend on the name check
339
      raise errors.OpPrereqError("Cannot do IP address check without a name"
340
                                 " check", errors.ECODE_INVAL)
341

    
342
    # check nics' parameter names
343
    for nic in self.op.nics:
344
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345
    # check that NIC's parameters names are unique and valid
346
    utils.ValidateDeviceNames("NIC", self.op.nics)
347

    
348
    # check that disk's names are unique and valid
349
    utils.ValidateDeviceNames("disk", self.op.disks)
350

    
351
    cluster = self.cfg.GetClusterInfo()
352
    if not self.op.disk_template in cluster.enabled_disk_templates:
353
      raise errors.OpPrereqError("Cannot create an instance with disk template"
354
                                 " '%s', because it is not enabled in the"
355
                                 " cluster. Enabled disk templates are: %s." %
356
                                 (self.op.disk_template,
357
                                  ",".join(cluster.enabled_disk_templates)))
358

    
359
    # check disks. parameter names and consistent adopt/no-adopt strategy
360
    has_adopt = has_no_adopt = False
361
    for disk in self.op.disks:
362
      if self.op.disk_template != constants.DT_EXT:
363
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364
      if constants.IDISK_ADOPT in disk:
365
        has_adopt = True
366
      else:
367
        has_no_adopt = True
368
    if has_adopt and has_no_adopt:
369
      raise errors.OpPrereqError("Either all disks are adopted or none is",
370
                                 errors.ECODE_INVAL)
371
    if has_adopt:
372
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373
        raise errors.OpPrereqError("Disk adoption is not supported for the"
374
                                   " '%s' disk template" %
375
                                   self.op.disk_template,
376
                                   errors.ECODE_INVAL)
377
      if self.op.iallocator is not None:
378
        raise errors.OpPrereqError("Disk adoption not allowed with an"
379
                                   " iallocator script", errors.ECODE_INVAL)
380
      if self.op.mode == constants.INSTANCE_IMPORT:
381
        raise errors.OpPrereqError("Disk adoption not allowed for"
382
                                   " instance import", errors.ECODE_INVAL)
383
    else:
384
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
385
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386
                                   " but no 'adopt' parameter given" %
387
                                   self.op.disk_template,
388
                                   errors.ECODE_INVAL)
389

    
390
    self.adopt_disks = has_adopt
391

    
392
    # instance name verification
393
    if self.op.name_check:
394
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395
      self.op.instance_name = self.hostname1.name
396
      # used in CheckPrereq for ip ping check
397
      self.check_ip = self.hostname1.ip
398
    else:
399
      self.check_ip = None
400

    
401
    # file storage checks
402
    if (self.op.file_driver and
403
        not self.op.file_driver in constants.FILE_DRIVER):
404
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
405
                                 self.op.file_driver, errors.ECODE_INVAL)
406

    
407
    # set default file_driver if unset and required
408
    if (not self.op.file_driver and
409
        self.op.disk_template in [constants.DT_FILE,
410
                                  constants.DT_SHARED_FILE]):
411
      self.op.file_driver = constants.FD_DEFAULT
412

    
413
    if self.op.disk_template == constants.DT_FILE:
414
      opcodes.RequireFileStorage()
415
    elif self.op.disk_template == constants.DT_SHARED_FILE:
416
      opcodes.RequireSharedFileStorage()
417

    
418
    ### Node/iallocator related checks
419
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
420

    
421
    if self.op.pnode is not None:
422
      if self.op.disk_template in constants.DTS_INT_MIRROR:
423
        if self.op.snode is None:
424
          raise errors.OpPrereqError("The networked disk templates need"
425
                                     " a mirror node", errors.ECODE_INVAL)
426
      elif self.op.snode:
427
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
428
                        " template")
429
        self.op.snode = None
430

    
431
    _CheckOpportunisticLocking(self.op)
432

    
433
    self._cds = GetClusterDomainSecret()
434

    
435
    if self.op.mode == constants.INSTANCE_IMPORT:
436
      # On import force_variant must be True, because if we forced it at
437
      # initial install, our only chance when importing it back is that it
438
      # works again!
439
      self.op.force_variant = True
440

    
441
      if self.op.no_install:
442
        self.LogInfo("No-installation mode has no effect during import")
443

    
444
    elif self.op.mode == constants.INSTANCE_CREATE:
445
      if self.op.os_type is None:
446
        raise errors.OpPrereqError("No guest OS specified",
447
                                   errors.ECODE_INVAL)
448
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
449
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
450
                                   " installation" % self.op.os_type,
451
                                   errors.ECODE_STATE)
452
      if self.op.disk_template is None:
453
        raise errors.OpPrereqError("No disk template specified",
454
                                   errors.ECODE_INVAL)
455

    
456
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
457
      # Check handshake to ensure both clusters have the same domain secret
458
      src_handshake = self.op.source_handshake
459
      if not src_handshake:
460
        raise errors.OpPrereqError("Missing source handshake",
461
                                   errors.ECODE_INVAL)
462

    
463
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
464
                                                           src_handshake)
465
      if errmsg:
466
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
467
                                   errors.ECODE_INVAL)
468

    
469
      # Load and check source CA
470
      self.source_x509_ca_pem = self.op.source_x509_ca
471
      if not self.source_x509_ca_pem:
472
        raise errors.OpPrereqError("Missing source X509 CA",
473
                                   errors.ECODE_INVAL)
474

    
475
      try:
476
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
477
                                                    self._cds)
478
      except OpenSSL.crypto.Error, err:
479
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
480
                                   (err, ), errors.ECODE_INVAL)
481

    
482
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
483
      if errcode is not None:
484
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
485
                                   errors.ECODE_INVAL)
486

    
487
      self.source_x509_ca = cert
488

    
489
      src_instance_name = self.op.source_instance_name
490
      if not src_instance_name:
491
        raise errors.OpPrereqError("Missing source instance name",
492
                                   errors.ECODE_INVAL)
493

    
494
      self.source_instance_name = \
495
        netutils.GetHostname(name=src_instance_name).name
496

    
497
    else:
498
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
499
                                 self.op.mode, errors.ECODE_INVAL)
500

    
501
  def ExpandNames(self):
502
    """ExpandNames for CreateInstance.
503

504
    Figure out the right locks for instance creation.
505

506
    """
507
    self.needed_locks = {}
508

    
509
    instance_name = self.op.instance_name
510
    # this is just a preventive check, but someone might still add this
511
    # instance in the meantime, and creation will fail at lock-add time
512
    if instance_name in self.cfg.GetInstanceList():
513
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
514
                                 instance_name, errors.ECODE_EXISTS)
515

    
516
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
517

    
518
    if self.op.iallocator:
519
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
520
      # specifying a group on instance creation and then selecting nodes from
521
      # that group
522
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
523
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
524

    
525
      if self.op.opportunistic_locking:
526
        self.opportunistic_locks[locking.LEVEL_NODE] = True
527
    else:
528
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
529
      nodelist = [self.op.pnode]
530
      if self.op.snode is not None:
531
        self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
532
        nodelist.append(self.op.snode)
533
      self.needed_locks[locking.LEVEL_NODE] = nodelist
534

    
535
    # in case of import lock the source node too
536
    if self.op.mode == constants.INSTANCE_IMPORT:
537
      src_node = self.op.src_node
538
      src_path = self.op.src_path
539

    
540
      if src_path is None:
541
        self.op.src_path = src_path = self.op.instance_name
542

    
543
      if src_node is None:
544
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
545
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
546
        self.op.src_node = None
547
        if os.path.isabs(src_path):
548
          raise errors.OpPrereqError("Importing an instance from a path"
549
                                     " requires a source node option",
550
                                     errors.ECODE_INVAL)
551
      else:
552
        self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
553
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
554
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
555
        if not os.path.isabs(src_path):
556
          self.op.src_path = src_path = \
557
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
558

    
559
    self.needed_locks[locking.LEVEL_NODE_RES] = \
560
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
561

    
562
    # Optimistically acquire shared group locks (we're reading the
563
    # configuration).  We can't just call GetInstanceNodeGroups, because the
564
    # instance doesn't exist yet. Therefore we lock all node groups of all
565
    # nodes we have.
566
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
567
      # In the case we lock all nodes for opportunistic allocation, we have no
568
      # choice than to lock all groups, because they're allocated before nodes.
569
      # This is sad, but true. At least we release all those we don't need in
570
      # CheckPrereq later.
571
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
572
    else:
573
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
574
        list(self.cfg.GetNodeGroupsFromNodes(
575
          self.needed_locks[locking.LEVEL_NODE]))
576
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
577

    
578
  def DeclareLocks(self, level):
579
    if level == locking.LEVEL_NODE_RES and \
580
      self.opportunistic_locks[locking.LEVEL_NODE]:
581
      # Even when using opportunistic locking, we require the same set of
582
      # NODE_RES locks as we got NODE locks
583
      self.needed_locks[locking.LEVEL_NODE_RES] = \
584
        self.owned_locks(locking.LEVEL_NODE)
585

    
586
  def _RunAllocator(self):
587
    """Run the allocator based on input opcode.
588

589
    """
590
    if self.op.opportunistic_locking:
591
      # Only consider nodes for which a lock is held
592
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
593
    else:
594
      node_whitelist = None
595

    
596
    #TODO Export network to iallocator so that it chooses a pnode
597
    #     in a nodegroup that has the desired network connected to
598
    req = _CreateInstanceAllocRequest(self.op, self.disks,
599
                                      self.nics, self.be_full,
600
                                      node_whitelist)
601
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
602

    
603
    ial.Run(self.op.iallocator)
604

    
605
    if not ial.success:
606
      # When opportunistic locks are used only a temporary failure is generated
607
      if self.op.opportunistic_locking:
608
        ecode = errors.ECODE_TEMP_NORES
609
      else:
610
        ecode = errors.ECODE_NORES
611

    
612
      raise errors.OpPrereqError("Can't compute nodes using"
613
                                 " iallocator '%s': %s" %
614
                                 (self.op.iallocator, ial.info),
615
                                 ecode)
616

    
617
    self.op.pnode = ial.result[0]
618
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
619
                 self.op.instance_name, self.op.iallocator,
620
                 utils.CommaJoin(ial.result))
621

    
622
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
623

    
624
    if req.RequiredNodes() == 2:
625
      self.op.snode = ial.result[1]
626

    
627
  def BuildHooksEnv(self):
628
    """Build hooks env.
629

630
    This runs on master, primary and secondary nodes of the instance.
631

632
    """
633
    env = {
634
      "ADD_MODE": self.op.mode,
635
      }
636
    if self.op.mode == constants.INSTANCE_IMPORT:
637
      env["SRC_NODE"] = self.op.src_node
638
      env["SRC_PATH"] = self.op.src_path
639
      env["SRC_IMAGES"] = self.src_images
640

    
641
    env.update(BuildInstanceHookEnv(
642
      name=self.op.instance_name,
643
      primary_node=self.op.pnode,
644
      secondary_nodes=self.secondaries,
645
      status=self.op.start,
646
      os_type=self.op.os_type,
647
      minmem=self.be_full[constants.BE_MINMEM],
648
      maxmem=self.be_full[constants.BE_MAXMEM],
649
      vcpus=self.be_full[constants.BE_VCPUS],
650
      nics=NICListToTuple(self, self.nics),
651
      disk_template=self.op.disk_template,
652
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
653
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
654
             for d in self.disks],
655
      bep=self.be_full,
656
      hvp=self.hv_full,
657
      hypervisor_name=self.op.hypervisor,
658
      tags=self.op.tags,
659
      ))
660

    
661
    return env
662

    
663
  def BuildHooksNodes(self):
664
    """Build hooks nodes.
665

666
    """
667
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
668
    return nl, nl
669

    
670
  def _ReadExportInfo(self):
671
    """Reads the export information from disk.
672

673
    It will override the opcode source node and path with the actual
674
    information, if these two were not specified before.
675

676
    @return: the export information
677

678
    """
679
    assert self.op.mode == constants.INSTANCE_IMPORT
680

    
681
    src_node = self.op.src_node
682
    src_path = self.op.src_path
683

    
684
    if src_node is None:
685
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
686
      exp_list = self.rpc.call_export_list(locked_nodes)
687
      found = False
688
      for node in exp_list:
689
        if exp_list[node].fail_msg:
690
          continue
691
        if src_path in exp_list[node].payload:
692
          found = True
693
          self.op.src_node = src_node = node
694
          self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
695
                                                       src_path)
696
          break
697
      if not found:
698
        raise errors.OpPrereqError("No export found for relative path %s" %
699
                                   src_path, errors.ECODE_INVAL)
700

    
701
    CheckNodeOnline(self, src_node)
702
    result = self.rpc.call_export_info(src_node, src_path)
703
    result.Raise("No export or invalid export found in dir %s" % src_path)
704

    
705
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
706
    if not export_info.has_section(constants.INISECT_EXP):
707
      raise errors.ProgrammerError("Corrupted export config",
708
                                   errors.ECODE_ENVIRON)
709

    
710
    ei_version = export_info.get(constants.INISECT_EXP, "version")
711
    if (int(ei_version) != constants.EXPORT_VERSION):
712
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
713
                                 (ei_version, constants.EXPORT_VERSION),
714
                                 errors.ECODE_ENVIRON)
715
    return export_info
716

    
717
  def _ReadExportParams(self, einfo):
718
    """Use export parameters as defaults.
719

720
    In case the opcode doesn't specify (as in override) some instance
721
    parameters, then try to use them from the export information, if
722
    that declares them.
723

724
    """
725
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
726

    
727
    if self.op.disk_template is None:
728
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
729
        self.op.disk_template = einfo.get(constants.INISECT_INS,
730
                                          "disk_template")
731
        if self.op.disk_template not in constants.DISK_TEMPLATES:
732
          raise errors.OpPrereqError("Disk template specified in configuration"
733
                                     " file is not one of the allowed values:"
734
                                     " %s" %
735
                                     " ".join(constants.DISK_TEMPLATES),
736
                                     errors.ECODE_INVAL)
737
      else:
738
        raise errors.OpPrereqError("No disk template specified and the export"
739
                                   " is missing the disk_template information",
740
                                   errors.ECODE_INVAL)
741

    
742
    if not self.op.disks:
743
      disks = []
744
      # TODO: import the disk iv_name too
745
      for idx in range(constants.MAX_DISKS):
746
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
747
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
748
          disks.append({constants.IDISK_SIZE: disk_sz})
749
      self.op.disks = disks
750
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
751
        raise errors.OpPrereqError("No disk info specified and the export"
752
                                   " is missing the disk information",
753
                                   errors.ECODE_INVAL)
754

    
755
    if not self.op.nics:
756
      nics = []
757
      for idx in range(constants.MAX_NICS):
758
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
759
          ndict = {}
760
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
761
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
762
            ndict[name] = v
763
          nics.append(ndict)
764
        else:
765
          break
766
      self.op.nics = nics
767

    
768
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
769
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
770

    
771
    if (self.op.hypervisor is None and
772
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
773
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
774

    
775
    if einfo.has_section(constants.INISECT_HYP):
776
      # use the export parameters but do not override the ones
777
      # specified by the user
778
      for name, value in einfo.items(constants.INISECT_HYP):
779
        if name not in self.op.hvparams:
780
          self.op.hvparams[name] = value
781

    
782
    if einfo.has_section(constants.INISECT_BEP):
783
      # use the parameters, without overriding
784
      for name, value in einfo.items(constants.INISECT_BEP):
785
        if name not in self.op.beparams:
786
          self.op.beparams[name] = value
787
        # Compatibility for the old "memory" be param
788
        if name == constants.BE_MEMORY:
789
          if constants.BE_MAXMEM not in self.op.beparams:
790
            self.op.beparams[constants.BE_MAXMEM] = value
791
          if constants.BE_MINMEM not in self.op.beparams:
792
            self.op.beparams[constants.BE_MINMEM] = value
793
    else:
794
      # try to read the parameters old style, from the main section
795
      for name in constants.BES_PARAMETERS:
796
        if (name not in self.op.beparams and
797
            einfo.has_option(constants.INISECT_INS, name)):
798
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
799

    
800
    if einfo.has_section(constants.INISECT_OSP):
801
      # use the parameters, without overriding
802
      for name, value in einfo.items(constants.INISECT_OSP):
803
        if name not in self.op.osparams:
804
          self.op.osparams[name] = value
805

    
806
  def _RevertToDefaults(self, cluster):
807
    """Revert the instance parameters to the default values.
808

809
    """
810
    # hvparams
811
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
812
    for name in self.op.hvparams.keys():
813
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
814
        del self.op.hvparams[name]
815
    # beparams
816
    be_defs = cluster.SimpleFillBE({})
817
    for name in self.op.beparams.keys():
818
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
819
        del self.op.beparams[name]
820
    # nic params
821
    nic_defs = cluster.SimpleFillNIC({})
822
    for nic in self.op.nics:
823
      for name in constants.NICS_PARAMETERS:
824
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
825
          del nic[name]
826
    # osparams
827
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
828
    for name in self.op.osparams.keys():
829
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
830
        del self.op.osparams[name]
831

    
832
  def _CalculateFileStorageDir(self):
833
    """Calculate final instance file storage dir.
834

835
    """
836
    # file storage dir calculation/check
837
    self.instance_file_storage_dir = None
838
    if self.op.disk_template in constants.DTS_FILEBASED:
839
      # build the full file storage dir path
840
      joinargs = []
841

    
842
      if self.op.disk_template == constants.DT_SHARED_FILE:
843
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
844
      else:
845
        get_fsd_fn = self.cfg.GetFileStorageDir
846

    
847
      cfg_storagedir = get_fsd_fn()
848
      if not cfg_storagedir:
849
        raise errors.OpPrereqError("Cluster file storage dir not defined",
850
                                   errors.ECODE_STATE)
851
      joinargs.append(cfg_storagedir)
852

    
853
      if self.op.file_storage_dir is not None:
854
        joinargs.append(self.op.file_storage_dir)
855

    
856
      joinargs.append(self.op.instance_name)
857

    
858
      # pylint: disable=W0142
859
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
860

    
861
  def CheckPrereq(self): # pylint: disable=R0914
862
    """Check prerequisites.
863

864
    """
865
    # Check that the optimistically acquired groups are correct wrt the
866
    # acquired nodes
867
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
868
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
869
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
870
    if not owned_groups.issuperset(cur_groups):
871
      raise errors.OpPrereqError("New instance %s's node groups changed since"
872
                                 " locks were acquired, current groups are"
873
                                 " are '%s', owning groups '%s'; retry the"
874
                                 " operation" %
875
                                 (self.op.instance_name,
876
                                  utils.CommaJoin(cur_groups),
877
                                  utils.CommaJoin(owned_groups)),
878
                                 errors.ECODE_STATE)
879

    
880
    self._CalculateFileStorageDir()
881

    
882
    if self.op.mode == constants.INSTANCE_IMPORT:
883
      export_info = self._ReadExportInfo()
884
      self._ReadExportParams(export_info)
885
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
886
    else:
887
      self._old_instance_name = None
888

    
889
    if (not self.cfg.GetVGName() and
890
        self.op.disk_template not in constants.DTS_NOT_LVM):
891
      raise errors.OpPrereqError("Cluster does not support lvm-based"
892
                                 " instances", errors.ECODE_STATE)
893

    
894
    if (self.op.hypervisor is None or
895
        self.op.hypervisor == constants.VALUE_AUTO):
896
      self.op.hypervisor = self.cfg.GetHypervisorType()
897

    
898
    cluster = self.cfg.GetClusterInfo()
899
    enabled_hvs = cluster.enabled_hypervisors
900
    if self.op.hypervisor not in enabled_hvs:
901
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
902
                                 " cluster (%s)" %
903
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
904
                                 errors.ECODE_STATE)
905

    
906
    # Check tag validity
907
    for tag in self.op.tags:
908
      objects.TaggableObject.ValidateTag(tag)
909

    
910
    # check hypervisor parameter syntax (locally)
911
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
912
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
913
                                      self.op.hvparams)
914
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
915
    hv_type.CheckParameterSyntax(filled_hvp)
916
    self.hv_full = filled_hvp
917
    # check that we don't specify global parameters on an instance
918
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
919
                         "instance", "cluster")
920

    
921
    # fill and remember the beparams dict
922
    self.be_full = _ComputeFullBeParams(self.op, cluster)
923

    
924
    # build os parameters
925
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
926

    
927
    # now that hvp/bep are in final format, let's reset to defaults,
928
    # if told to do so
929
    if self.op.identify_defaults:
930
      self._RevertToDefaults(cluster)
931

    
932
    # NIC buildup
933
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
934
                             self.proc.GetECId())
935

    
936
    # disk checks/pre-build
937
    default_vg = self.cfg.GetVGName()
938
    self.disks = ComputeDisks(self.op, default_vg)
939

    
940
    if self.op.mode == constants.INSTANCE_IMPORT:
941
      disk_images = []
942
      for idx in range(len(self.disks)):
943
        option = "disk%d_dump" % idx
944
        if export_info.has_option(constants.INISECT_INS, option):
945
          # FIXME: are the old os-es, disk sizes, etc. useful?
946
          export_name = export_info.get(constants.INISECT_INS, option)
947
          image = utils.PathJoin(self.op.src_path, export_name)
948
          disk_images.append(image)
949
        else:
950
          disk_images.append(False)
951

    
952
      self.src_images = disk_images
953

    
954
      if self.op.instance_name == self._old_instance_name:
955
        for idx, nic in enumerate(self.nics):
956
          if nic.mac == constants.VALUE_AUTO:
957
            nic_mac_ini = "nic%d_mac" % idx
958
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
959

    
960
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
961

    
962
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
963
    if self.op.ip_check:
964
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
965
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
966
                                   (self.check_ip, self.op.instance_name),
967
                                   errors.ECODE_NOTUNIQUE)
968

    
969
    #### mac address generation
970
    # By generating here the mac address both the allocator and the hooks get
971
    # the real final mac address rather than the 'auto' or 'generate' value.
972
    # There is a race condition between the generation and the instance object
973
    # creation, which means that we know the mac is valid now, but we're not
974
    # sure it will be when we actually add the instance. If things go bad
975
    # adding the instance will abort because of a duplicate mac, and the
976
    # creation job will fail.
977
    for nic in self.nics:
978
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
979
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
980

    
981
    #### allocator run
982

    
983
    if self.op.iallocator is not None:
984
      self._RunAllocator()
985

    
986
    # Release all unneeded node locks
987
    keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
988
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
989
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
990
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
991
    # Release all unneeded group locks
992
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
993
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
994

    
995
    assert (self.owned_locks(locking.LEVEL_NODE) ==
996
            self.owned_locks(locking.LEVEL_NODE_RES)), \
997
      "Node locks differ from node resource locks"
998

    
999
    #### node related checks
1000

    
1001
    # check primary node
1002
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
1003
    assert self.pnode is not None, \
1004
      "Cannot retrieve locked node %s" % self.op.pnode
1005
    if pnode.offline:
1006
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1007
                                 pnode.name, errors.ECODE_STATE)
1008
    if pnode.drained:
1009
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1010
                                 pnode.name, errors.ECODE_STATE)
1011
    if not pnode.vm_capable:
1012
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1013
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1014

    
1015
    self.secondaries = []
1016

    
1017
    # Fill in any IPs from IP pools. This must happen here, because we need to
1018
    # know the nic's primary node, as specified by the iallocator
1019
    for idx, nic in enumerate(self.nics):
1020
      net_uuid = nic.network
1021
      if net_uuid is not None:
1022
        nobj = self.cfg.GetNetwork(net_uuid)
1023
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
1024
        if netparams is None:
1025
          raise errors.OpPrereqError("No netparams found for network"
1026
                                     " %s. Propably not connected to"
1027
                                     " node's %s nodegroup" %
1028
                                     (nobj.name, self.pnode.name),
1029
                                     errors.ECODE_INVAL)
1030
        self.LogInfo("NIC/%d inherits netparams %s" %
1031
                     (idx, netparams.values()))
1032
        nic.nicparams = dict(netparams)
1033
        if nic.ip is not None:
1034
          if nic.ip.lower() == constants.NIC_IP_POOL:
1035
            try:
1036
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1037
            except errors.ReservationError:
1038
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1039
                                         " from the address pool" % idx,
1040
                                         errors.ECODE_STATE)
1041
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1042
          else:
1043
            try:
1044
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1045
            except errors.ReservationError:
1046
              raise errors.OpPrereqError("IP address %s already in use"
1047
                                         " or does not belong to network %s" %
1048
                                         (nic.ip, nobj.name),
1049
                                         errors.ECODE_NOTUNIQUE)
1050

    
1051
      # net is None, ip None or given
1052
      elif self.op.conflicts_check:
1053
        _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1054

    
1055
    # mirror node verification
1056
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1057
      if self.op.snode == pnode.name:
1058
        raise errors.OpPrereqError("The secondary node cannot be the"
1059
                                   " primary node", errors.ECODE_INVAL)
1060
      CheckNodeOnline(self, self.op.snode)
1061
      CheckNodeNotDrained(self, self.op.snode)
1062
      CheckNodeVmCapable(self, self.op.snode)
1063
      self.secondaries.append(self.op.snode)
1064

    
1065
      snode = self.cfg.GetNodeInfo(self.op.snode)
1066
      if pnode.group != snode.group:
1067
        self.LogWarning("The primary and secondary nodes are in two"
1068
                        " different node groups; the disk parameters"
1069
                        " from the first disk's node group will be"
1070
                        " used")
1071

    
1072
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1073
      nodes = [pnode]
1074
      if self.op.disk_template in constants.DTS_INT_MIRROR:
1075
        nodes.append(snode)
1076
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1077
      if compat.any(map(has_es, nodes)):
1078
        raise errors.OpPrereqError("Disk template %s not supported with"
1079
                                   " exclusive storage" % self.op.disk_template,
1080
                                   errors.ECODE_STATE)
1081

    
1082
    nodenames = [pnode.name] + self.secondaries
1083

    
1084
    if not self.adopt_disks:
1085
      if self.op.disk_template == constants.DT_RBD:
1086
        # _CheckRADOSFreeSpace() is just a placeholder.
1087
        # Any function that checks prerequisites can be placed here.
1088
        # Check if there is enough space on the RADOS cluster.
1089
        CheckRADOSFreeSpace()
1090
      elif self.op.disk_template == constants.DT_EXT:
1091
        # FIXME: Function that checks prereqs if needed
1092
        pass
1093
      else:
1094
        # Check lv size requirements, if not adopting
1095
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1096
        CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1097

    
1098
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1099
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1100
                                disk[constants.IDISK_ADOPT])
1101
                     for disk in self.disks])
1102
      if len(all_lvs) != len(self.disks):
1103
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1104
                                   errors.ECODE_INVAL)
1105
      for lv_name in all_lvs:
1106
        try:
1107
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1108
          # to ReserveLV uses the same syntax
1109
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1110
        except errors.ReservationError:
1111
          raise errors.OpPrereqError("LV named %s used by another instance" %
1112
                                     lv_name, errors.ECODE_NOTUNIQUE)
1113

    
1114
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1115
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1116

    
1117
      node_lvs = self.rpc.call_lv_list([pnode.name],
1118
                                       vg_names.payload.keys())[pnode.name]
1119
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1120
      node_lvs = node_lvs.payload
1121

    
1122
      delta = all_lvs.difference(node_lvs.keys())
1123
      if delta:
1124
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1125
                                   utils.CommaJoin(delta),
1126
                                   errors.ECODE_INVAL)
1127
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1128
      if online_lvs:
1129
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1130
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1131
                                   errors.ECODE_STATE)
1132
      # update the size of disk based on what is found
1133
      for dsk in self.disks:
1134
        dsk[constants.IDISK_SIZE] = \
1135
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1136
                                        dsk[constants.IDISK_ADOPT])][0]))
1137

    
1138
    elif self.op.disk_template == constants.DT_BLOCK:
1139
      # Normalize and de-duplicate device paths
1140
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1141
                       for disk in self.disks])
1142
      if len(all_disks) != len(self.disks):
1143
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1144
                                   errors.ECODE_INVAL)
1145
      baddisks = [d for d in all_disks
1146
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1147
      if baddisks:
1148
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1149
                                   " cannot be adopted" %
1150
                                   (utils.CommaJoin(baddisks),
1151
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1152
                                   errors.ECODE_INVAL)
1153

    
1154
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
1155
                                            list(all_disks))[pnode.name]
1156
      node_disks.Raise("Cannot get block device information from node %s" %
1157
                       pnode.name)
1158
      node_disks = node_disks.payload
1159
      delta = all_disks.difference(node_disks.keys())
1160
      if delta:
1161
        raise errors.OpPrereqError("Missing block device(s): %s" %
1162
                                   utils.CommaJoin(delta),
1163
                                   errors.ECODE_INVAL)
1164
      for dsk in self.disks:
1165
        dsk[constants.IDISK_SIZE] = \
1166
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1167

    
1168
    # Verify instance specs
1169
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1170
    ispec = {
1171
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1172
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1173
      constants.ISPEC_DISK_COUNT: len(self.disks),
1174
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1175
                                  for disk in self.disks],
1176
      constants.ISPEC_NIC_COUNT: len(self.nics),
1177
      constants.ISPEC_SPINDLE_USE: spindle_use,
1178
      }
1179

    
1180
    group_info = self.cfg.GetNodeGroup(pnode.group)
1181
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1182
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1183
                                               self.op.disk_template)
1184
    if not self.op.ignore_ipolicy and res:
1185
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1186
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1187
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1188

    
1189
    CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1190

    
1191
    CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1192
    # check OS parameters (remotely)
1193
    CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1194

    
1195
    CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1196

    
1197
    #TODO: _CheckExtParams (remotely)
1198
    # Check parameters for extstorage
1199

    
1200
    # memory check on primary node
1201
    #TODO(dynmem): use MINMEM for checking
1202
    if self.op.start:
1203
      CheckNodeFreeMemory(self, self.pnode.name,
1204
                          "creating instance %s" % self.op.instance_name,
1205
                          self.be_full[constants.BE_MAXMEM],
1206
                          self.op.hypervisor)
1207

    
1208
    self.dry_run_result = list(nodenames)
1209

    
1210
  def Exec(self, feedback_fn):
1211
    """Create and add the instance to the cluster.
1212

1213
    """
1214
    instance = self.op.instance_name
1215
    pnode_name = self.pnode.name
1216

    
1217
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1218
                self.owned_locks(locking.LEVEL_NODE)), \
1219
      "Node locks differ from node resource locks"
1220
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1221

    
1222
    ht_kind = self.op.hypervisor
1223
    if ht_kind in constants.HTS_REQ_PORT:
1224
      network_port = self.cfg.AllocatePort()
1225
    else:
1226
      network_port = None
1227

    
1228
    # This is ugly but we got a chicken-egg problem here
1229
    # We can only take the group disk parameters, as the instance
1230
    # has no disks yet (we are generating them right here).
1231
    node = self.cfg.GetNodeInfo(pnode_name)
1232
    nodegroup = self.cfg.GetNodeGroup(node.group)
1233
    disks = GenerateDiskTemplate(self,
1234
                                 self.op.disk_template,
1235
                                 instance, pnode_name,
1236
                                 self.secondaries,
1237
                                 self.disks,
1238
                                 self.instance_file_storage_dir,
1239
                                 self.op.file_driver,
1240
                                 0,
1241
                                 feedback_fn,
1242
                                 self.cfg.GetGroupDiskParams(nodegroup))
1243

    
1244
    iobj = objects.Instance(name=instance, os=self.op.os_type,
1245
                            primary_node=pnode_name,
1246
                            nics=self.nics, disks=disks,
1247
                            disk_template=self.op.disk_template,
1248
                            disks_active=False,
1249
                            admin_state=constants.ADMINST_DOWN,
1250
                            network_port=network_port,
1251
                            beparams=self.op.beparams,
1252
                            hvparams=self.op.hvparams,
1253
                            hypervisor=self.op.hypervisor,
1254
                            osparams=self.op.osparams,
1255
                            )
1256

    
1257
    if self.op.tags:
1258
      for tag in self.op.tags:
1259
        iobj.AddTag(tag)
1260

    
1261
    if self.adopt_disks:
1262
      if self.op.disk_template == constants.DT_PLAIN:
1263
        # rename LVs to the newly-generated names; we need to construct
1264
        # 'fake' LV disks with the old data, plus the new unique_id
1265
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1266
        rename_to = []
1267
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1268
          rename_to.append(t_dsk.logical_id)
1269
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1270
          self.cfg.SetDiskID(t_dsk, pnode_name)
1271
        result = self.rpc.call_blockdev_rename(pnode_name,
1272
                                               zip(tmp_disks, rename_to))
1273
        result.Raise("Failed to rename adoped LVs")
1274
    else:
1275
      feedback_fn("* creating instance disks...")
1276
      try:
1277
        CreateDisks(self, iobj)
1278
      except errors.OpExecError:
1279
        self.LogWarning("Device creation failed")
1280
        self.cfg.ReleaseDRBDMinors(instance)
1281
        raise
1282

    
1283
    feedback_fn("adding instance %s to cluster config" % instance)
1284

    
1285
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1286

    
1287
    # Declare that we don't want to remove the instance lock anymore, as we've
1288
    # added the instance to the config
1289
    del self.remove_locks[locking.LEVEL_INSTANCE]
1290

    
1291
    if self.op.mode == constants.INSTANCE_IMPORT:
1292
      # Release unused nodes
1293
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1294
    else:
1295
      # Release all nodes
1296
      ReleaseLocks(self, locking.LEVEL_NODE)
1297

    
1298
    disk_abort = False
1299
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1300
      feedback_fn("* wiping instance disks...")
1301
      try:
1302
        WipeDisks(self, iobj)
1303
      except errors.OpExecError, err:
1304
        logging.exception("Wiping disks failed")
1305
        self.LogWarning("Wiping instance disks failed (%s)", err)
1306
        disk_abort = True
1307

    
1308
    if disk_abort:
1309
      # Something is already wrong with the disks, don't do anything else
1310
      pass
1311
    elif self.op.wait_for_sync:
1312
      disk_abort = not WaitForSync(self, iobj)
1313
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1314
      # make sure the disks are not degraded (still sync-ing is ok)
1315
      feedback_fn("* checking mirrors status")
1316
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1317
    else:
1318
      disk_abort = False
1319

    
1320
    if disk_abort:
1321
      RemoveDisks(self, iobj)
1322
      self.cfg.RemoveInstance(iobj.name)
1323
      # Make sure the instance lock gets removed
1324
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1325
      raise errors.OpExecError("There are some degraded disks for"
1326
                               " this instance")
1327

    
1328
    # instance disks are now active
1329
    iobj.disks_active = True
1330

    
1331
    # Release all node resource locks
1332
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1333

    
1334
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1335
      # we need to set the disks ID to the primary node, since the
1336
      # preceding code might or might have not done it, depending on
1337
      # disk template and other options
1338
      for disk in iobj.disks:
1339
        self.cfg.SetDiskID(disk, pnode_name)
1340
      if self.op.mode == constants.INSTANCE_CREATE:
1341
        if not self.op.no_install:
1342
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1343
                        not self.op.wait_for_sync)
1344
          if pause_sync:
1345
            feedback_fn("* pausing disk sync to install instance OS")
1346
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1347
                                                              (iobj.disks,
1348
                                                               iobj), True)
1349
            for idx, success in enumerate(result.payload):
1350
              if not success:
1351
                logging.warn("pause-sync of instance %s for disk %d failed",
1352
                             instance, idx)
1353

    
1354
          feedback_fn("* running the instance OS create scripts...")
1355
          # FIXME: pass debug option from opcode to backend
1356
          os_add_result = \
1357
            self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1358
                                          self.op.debug_level)
1359
          if pause_sync:
1360
            feedback_fn("* resuming disk sync")
1361
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1362
                                                              (iobj.disks,
1363
                                                               iobj), False)
1364
            for idx, success in enumerate(result.payload):
1365
              if not success:
1366
                logging.warn("resume-sync of instance %s for disk %d failed",
1367
                             instance, idx)
1368

    
1369
          os_add_result.Raise("Could not add os for instance %s"
1370
                              " on node %s" % (instance, pnode_name))
1371

    
1372
      else:
1373
        if self.op.mode == constants.INSTANCE_IMPORT:
1374
          feedback_fn("* running the instance OS import scripts...")
1375

    
1376
          transfers = []
1377

    
1378
          for idx, image in enumerate(self.src_images):
1379
            if not image:
1380
              continue
1381

    
1382
            # FIXME: pass debug option from opcode to backend
1383
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1384
                                               constants.IEIO_FILE, (image, ),
1385
                                               constants.IEIO_SCRIPT,
1386
                                               (iobj.disks[idx], idx),
1387
                                               None)
1388
            transfers.append(dt)
1389

    
1390
          import_result = \
1391
            masterd.instance.TransferInstanceData(self, feedback_fn,
1392
                                                  self.op.src_node, pnode_name,
1393
                                                  self.pnode.secondary_ip,
1394
                                                  iobj, transfers)
1395
          if not compat.all(import_result):
1396
            self.LogWarning("Some disks for instance %s on node %s were not"
1397
                            " imported successfully" % (instance, pnode_name))
1398

    
1399
          rename_from = self._old_instance_name
1400

    
1401
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1402
          feedback_fn("* preparing remote import...")
1403
          # The source cluster will stop the instance before attempting to make
1404
          # a connection. In some cases stopping an instance can take a long
1405
          # time, hence the shutdown timeout is added to the connection
1406
          # timeout.
1407
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1408
                             self.op.source_shutdown_timeout)
1409
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1410

    
1411
          assert iobj.primary_node == self.pnode.name
1412
          disk_results = \
1413
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1414
                                          self.source_x509_ca,
1415
                                          self._cds, timeouts)
1416
          if not compat.all(disk_results):
1417
            # TODO: Should the instance still be started, even if some disks
1418
            # failed to import (valid for local imports, too)?
1419
            self.LogWarning("Some disks for instance %s on node %s were not"
1420
                            " imported successfully" % (instance, pnode_name))
1421

    
1422
          rename_from = self.source_instance_name
1423

    
1424
        else:
1425
          # also checked in the prereq part
1426
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1427
                                       % self.op.mode)
1428

    
1429
        # Run rename script on newly imported instance
1430
        assert iobj.name == instance
1431
        feedback_fn("Running rename script for %s" % instance)
1432
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1433
                                                   rename_from,
1434
                                                   self.op.debug_level)
1435
        if result.fail_msg:
1436
          self.LogWarning("Failed to run rename script for %s on node"
1437
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
1438

    
1439
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1440

    
1441
    if self.op.start:
1442
      iobj.admin_state = constants.ADMINST_UP
1443
      self.cfg.Update(iobj, feedback_fn)
1444
      logging.info("Starting instance %s on node %s", instance, pnode_name)
1445
      feedback_fn("* starting instance...")
1446
      result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1447
                                            False, self.op.reason)
1448
      result.Raise("Could not start instance")
1449

    
1450
    return list(iobj.all_nodes)
1451

    
1452

    
1453
class LUInstanceRename(LogicalUnit):
1454
  """Rename an instance.
1455

1456
  """
1457
  HPATH = "instance-rename"
1458
  HTYPE = constants.HTYPE_INSTANCE
1459

    
1460
  def CheckArguments(self):
1461
    """Check arguments.
1462

1463
    """
1464
    if self.op.ip_check and not self.op.name_check:
1465
      # TODO: make the ip check more flexible and not depend on the name check
1466
      raise errors.OpPrereqError("IP address check requires a name check",
1467
                                 errors.ECODE_INVAL)
1468

    
1469
  def BuildHooksEnv(self):
1470
    """Build hooks env.
1471

1472
    This runs on master, primary and secondary nodes of the instance.
1473

1474
    """
1475
    env = BuildInstanceHookEnvByObject(self, self.instance)
1476
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1477
    return env
1478

    
1479
  def BuildHooksNodes(self):
1480
    """Build hooks nodes.
1481

1482
    """
1483
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1484
    return (nl, nl)
1485

    
1486
  def CheckPrereq(self):
1487
    """Check prerequisites.
1488

1489
    This checks that the instance is in the cluster and is not running.
1490

1491
    """
1492
    self.op.instance_name = ExpandInstanceName(self.cfg,
1493
                                               self.op.instance_name)
1494
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1495
    assert instance is not None
1496
    CheckNodeOnline(self, instance.primary_node)
1497
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1498
                       msg="cannot rename")
1499
    self.instance = instance
1500

    
1501
    new_name = self.op.new_name
1502
    if self.op.name_check:
1503
      hostname = _CheckHostnameSane(self, new_name)
1504
      new_name = self.op.new_name = hostname.name
1505
      if (self.op.ip_check and
1506
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1507
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1508
                                   (hostname.ip, new_name),
1509
                                   errors.ECODE_NOTUNIQUE)
1510

    
1511
    instance_list = self.cfg.GetInstanceList()
1512
    if new_name in instance_list and new_name != instance.name:
1513
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1514
                                 new_name, errors.ECODE_EXISTS)
1515

    
1516
  def Exec(self, feedback_fn):
1517
    """Rename the instance.
1518

1519
    """
1520
    inst = self.instance
1521
    old_name = inst.name
1522

    
1523
    rename_file_storage = False
1524
    if (inst.disk_template in constants.DTS_FILEBASED and
1525
        self.op.new_name != inst.name):
1526
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1527
      rename_file_storage = True
1528

    
1529
    self.cfg.RenameInstance(inst.name, self.op.new_name)
1530
    # Change the instance lock. This is definitely safe while we hold the BGL.
1531
    # Otherwise the new lock would have to be added in acquired mode.
1532
    assert self.REQ_BGL
1533
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1534
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1535
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1536

    
1537
    # re-read the instance from the configuration after rename
1538
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
1539

    
1540
    if rename_file_storage:
1541
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1542
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1543
                                                     old_file_storage_dir,
1544
                                                     new_file_storage_dir)
1545
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1546
                   " (but the instance has been renamed in Ganeti)" %
1547
                   (inst.primary_node, old_file_storage_dir,
1548
                    new_file_storage_dir))
1549

    
1550
    StartInstanceDisks(self, inst, None)
1551
    # update info on disks
1552
    info = GetInstanceInfoText(inst)
1553
    for (idx, disk) in enumerate(inst.disks):
1554
      for node in inst.all_nodes:
1555
        self.cfg.SetDiskID(disk, node)
1556
        result = self.rpc.call_blockdev_setinfo(node, disk, info)
1557
        if result.fail_msg:
1558
          self.LogWarning("Error setting info on node %s for disk %s: %s",
1559
                          node, idx, result.fail_msg)
1560
    try:
1561
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1562
                                                 old_name, self.op.debug_level)
1563
      msg = result.fail_msg
1564
      if msg:
1565
        msg = ("Could not run OS rename script for instance %s on node %s"
1566
               " (but the instance has been renamed in Ganeti): %s" %
1567
               (inst.name, inst.primary_node, msg))
1568
        self.LogWarning(msg)
1569
    finally:
1570
      ShutdownInstanceDisks(self, inst)
1571

    
1572
    return inst.name
1573

    
1574

    
1575
class LUInstanceRemove(LogicalUnit):
1576
  """Remove an instance.
1577

1578
  """
1579
  HPATH = "instance-remove"
1580
  HTYPE = constants.HTYPE_INSTANCE
1581
  REQ_BGL = False
1582

    
1583
  def ExpandNames(self):
1584
    self._ExpandAndLockInstance()
1585
    self.needed_locks[locking.LEVEL_NODE] = []
1586
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1587
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1588

    
1589
  def DeclareLocks(self, level):
1590
    if level == locking.LEVEL_NODE:
1591
      self._LockInstancesNodes()
1592
    elif level == locking.LEVEL_NODE_RES:
1593
      # Copy node locks
1594
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1595
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1596

    
1597
  def BuildHooksEnv(self):
1598
    """Build hooks env.
1599

1600
    This runs on master, primary and secondary nodes of the instance.
1601

1602
    """
1603
    env = BuildInstanceHookEnvByObject(self, self.instance)
1604
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1605
    return env
1606

    
1607
  def BuildHooksNodes(self):
1608
    """Build hooks nodes.
1609

1610
    """
1611
    nl = [self.cfg.GetMasterNode()]
1612
    nl_post = list(self.instance.all_nodes) + nl
1613
    return (nl, nl_post)
1614

    
1615
  def CheckPrereq(self):
1616
    """Check prerequisites.
1617

1618
    This checks that the instance is in the cluster.
1619

1620
    """
1621
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1622
    assert self.instance is not None, \
1623
      "Cannot retrieve locked instance %s" % self.op.instance_name
1624

    
1625
  def Exec(self, feedback_fn):
1626
    """Remove the instance.
1627

1628
    """
1629
    instance = self.instance
1630
    logging.info("Shutting down instance %s on node %s",
1631
                 instance.name, instance.primary_node)
1632

    
1633
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1634
                                             self.op.shutdown_timeout,
1635
                                             self.op.reason)
1636
    msg = result.fail_msg
1637
    if msg:
1638
      if self.op.ignore_failures:
1639
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
1640
      else:
1641
        raise errors.OpExecError("Could not shutdown instance %s on"
1642
                                 " node %s: %s" %
1643
                                 (instance.name, instance.primary_node, msg))
1644

    
1645
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1646
            self.owned_locks(locking.LEVEL_NODE_RES))
1647
    assert not (set(instance.all_nodes) -
1648
                self.owned_locks(locking.LEVEL_NODE)), \
1649
      "Not owning correct locks"
1650

    
1651
    RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1652

    
1653

    
1654
class LUInstanceMove(LogicalUnit):
1655
  """Move an instance by data-copying.
1656

1657
  """
1658
  HPATH = "instance-move"
1659
  HTYPE = constants.HTYPE_INSTANCE
1660
  REQ_BGL = False
1661

    
1662
  def ExpandNames(self):
1663
    self._ExpandAndLockInstance()
1664
    target_node = ExpandNodeName(self.cfg, self.op.target_node)
1665
    self.op.target_node = target_node
1666
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
1667
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1668
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1669

    
1670
  def DeclareLocks(self, level):
1671
    if level == locking.LEVEL_NODE:
1672
      self._LockInstancesNodes(primary_only=True)
1673
    elif level == locking.LEVEL_NODE_RES:
1674
      # Copy node locks
1675
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1676
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1677

    
1678
  def BuildHooksEnv(self):
1679
    """Build hooks env.
1680

1681
    This runs on master, primary and secondary nodes of the instance.
1682

1683
    """
1684
    env = {
1685
      "TARGET_NODE": self.op.target_node,
1686
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1687
      }
1688
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1689
    return env
1690

    
1691
  def BuildHooksNodes(self):
1692
    """Build hooks nodes.
1693

1694
    """
1695
    nl = [
1696
      self.cfg.GetMasterNode(),
1697
      self.instance.primary_node,
1698
      self.op.target_node,
1699
      ]
1700
    return (nl, nl)
1701

    
1702
  def CheckPrereq(self):
1703
    """Check prerequisites.
1704

1705
    This checks that the instance is in the cluster.
1706

1707
    """
1708
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1709
    assert self.instance is not None, \
1710
      "Cannot retrieve locked instance %s" % self.op.instance_name
1711

    
1712
    if instance.disk_template not in constants.DTS_COPYABLE:
1713
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1714
                                 instance.disk_template, errors.ECODE_STATE)
1715

    
1716
    node = self.cfg.GetNodeInfo(self.op.target_node)
1717
    assert node is not None, \
1718
      "Cannot retrieve locked node %s" % self.op.target_node
1719

    
1720
    self.target_node = target_node = node.name
1721

    
1722
    if target_node == instance.primary_node:
1723
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1724
                                 (instance.name, target_node),
1725
                                 errors.ECODE_STATE)
1726

    
1727
    bep = self.cfg.GetClusterInfo().FillBE(instance)
1728

    
1729
    for idx, dsk in enumerate(instance.disks):
1730
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1731
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1732
                                   " cannot copy" % idx, errors.ECODE_STATE)
1733

    
1734
    CheckNodeOnline(self, target_node)
1735
    CheckNodeNotDrained(self, target_node)
1736
    CheckNodeVmCapable(self, target_node)
1737
    cluster = self.cfg.GetClusterInfo()
1738
    group_info = self.cfg.GetNodeGroup(node.group)
1739
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1740
    CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1741
                           ignore=self.op.ignore_ipolicy)
1742

    
1743
    if instance.admin_state == constants.ADMINST_UP:
1744
      # check memory requirements on the secondary node
1745
      CheckNodeFreeMemory(self, target_node,
1746
                          "failing over instance %s" %
1747
                          instance.name, bep[constants.BE_MAXMEM],
1748
                          instance.hypervisor)
1749
    else:
1750
      self.LogInfo("Not checking memory on the secondary node as"
1751
                   " instance will not be started")
1752

    
1753
    # check bridge existance
1754
    CheckInstanceBridgesExist(self, instance, node=target_node)
1755

    
1756
  def Exec(self, feedback_fn):
1757
    """Move an instance.
1758

1759
    The move is done by shutting it down on its present node, copying
1760
    the data over (slow) and starting it on the new node.
1761

1762
    """
1763
    instance = self.instance
1764

    
1765
    source_node = instance.primary_node
1766
    target_node = self.target_node
1767

    
1768
    self.LogInfo("Shutting down instance %s on source node %s",
1769
                 instance.name, source_node)
1770

    
1771
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1772
            self.owned_locks(locking.LEVEL_NODE_RES))
1773

    
1774
    result = self.rpc.call_instance_shutdown(source_node, instance,
1775
                                             self.op.shutdown_timeout,
1776
                                             self.op.reason)
1777
    msg = result.fail_msg
1778
    if msg:
1779
      if self.op.ignore_consistency:
1780
        self.LogWarning("Could not shutdown instance %s on node %s."
1781
                        " Proceeding anyway. Please make sure node"
1782
                        " %s is down. Error details: %s",
1783
                        instance.name, source_node, source_node, msg)
1784
      else:
1785
        raise errors.OpExecError("Could not shutdown instance %s on"
1786
                                 " node %s: %s" %
1787
                                 (instance.name, source_node, msg))
1788

    
1789
    # create the target disks
1790
    try:
1791
      CreateDisks(self, instance, target_node=target_node)
1792
    except errors.OpExecError:
1793
      self.LogWarning("Device creation failed")
1794
      self.cfg.ReleaseDRBDMinors(instance.name)
1795
      raise
1796

    
1797
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1798

    
1799
    errs = []
1800
    # activate, get path, copy the data over
1801
    for idx, disk in enumerate(instance.disks):
1802
      self.LogInfo("Copying data for disk %d", idx)
1803
      result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1804
                                               instance.name, True, idx)
1805
      if result.fail_msg:
1806
        self.LogWarning("Can't assemble newly created disk %d: %s",
1807
                        idx, result.fail_msg)
1808
        errs.append(result.fail_msg)
1809
        break
1810
      dev_path, _ = result.payload
1811
      result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1812
                                             target_node, dev_path,
1813
                                             cluster_name)
1814
      if result.fail_msg:
1815
        self.LogWarning("Can't copy data over for disk %d: %s",
1816
                        idx, result.fail_msg)
1817
        errs.append(result.fail_msg)
1818
        break
1819

    
1820
    if errs:
1821
      self.LogWarning("Some disks failed to copy, aborting")
1822
      try:
1823
        RemoveDisks(self, instance, target_node=target_node)
1824
      finally:
1825
        self.cfg.ReleaseDRBDMinors(instance.name)
1826
        raise errors.OpExecError("Errors during disk copy: %s" %
1827
                                 (",".join(errs),))
1828

    
1829
    instance.primary_node = target_node
1830
    self.cfg.Update(instance, feedback_fn)
1831

    
1832
    self.LogInfo("Removing the disks on the original node")
1833
    RemoveDisks(self, instance, target_node=source_node)
1834

    
1835
    # Only start the instance if it's marked as up
1836
    if instance.admin_state == constants.ADMINST_UP:
1837
      self.LogInfo("Starting instance %s on node %s",
1838
                   instance.name, target_node)
1839

    
1840
      disks_ok, _ = AssembleInstanceDisks(self, instance,
1841
                                          ignore_secondaries=True)
1842
      if not disks_ok:
1843
        ShutdownInstanceDisks(self, instance)
1844
        raise errors.OpExecError("Can't activate the instance's disks")
1845

    
1846
      result = self.rpc.call_instance_start(target_node,
1847
                                            (instance, None, None), False,
1848
                                            self.op.reason)
1849
      msg = result.fail_msg
1850
      if msg:
1851
        ShutdownInstanceDisks(self, instance)
1852
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1853
                                 (instance.name, target_node, msg))
1854

    
1855

    
1856
class LUInstanceMultiAlloc(NoHooksLU):
1857
  """Allocates multiple instances at the same time.
1858

1859
  """
1860
  REQ_BGL = False
1861

    
1862
  def CheckArguments(self):
1863
    """Check arguments.
1864

1865
    """
1866
    nodes = []
1867
    for inst in self.op.instances:
1868
      if inst.iallocator is not None:
1869
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1870
                                   " instance objects", errors.ECODE_INVAL)
1871
      nodes.append(bool(inst.pnode))
1872
      if inst.disk_template in constants.DTS_INT_MIRROR:
1873
        nodes.append(bool(inst.snode))
1874

    
1875
    has_nodes = compat.any(nodes)
1876
    if compat.all(nodes) ^ has_nodes:
1877
      raise errors.OpPrereqError("There are instance objects providing"
1878
                                 " pnode/snode while others do not",
1879
                                 errors.ECODE_INVAL)
1880

    
1881
    if not has_nodes and self.op.iallocator is None:
1882
      default_iallocator = self.cfg.GetDefaultIAllocator()
1883
      if default_iallocator:
1884
        self.op.iallocator = default_iallocator
1885
      else:
1886
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1887
                                   " given and no cluster-wide default"
1888
                                   " iallocator found; please specify either"
1889
                                   " an iallocator or nodes on the instances"
1890
                                   " or set a cluster-wide default iallocator",
1891
                                   errors.ECODE_INVAL)
1892

    
1893
    _CheckOpportunisticLocking(self.op)
1894

    
1895
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1896
    if dups:
1897
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1898
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1899

    
1900
  def ExpandNames(self):
1901
    """Calculate the locks.
1902

1903
    """
1904
    self.share_locks = ShareAll()
1905
    self.needed_locks = {
1906
      # iallocator will select nodes and even if no iallocator is used,
1907
      # collisions with LUInstanceCreate should be avoided
1908
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1909
      }
1910

    
1911
    if self.op.iallocator:
1912
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1913
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1914

    
1915
      if self.op.opportunistic_locking:
1916
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1917
    else:
1918
      nodeslist = []
1919
      for inst in self.op.instances:
1920
        inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1921
        nodeslist.append(inst.pnode)
1922
        if inst.snode is not None:
1923
          inst.snode = ExpandNodeName(self.cfg, inst.snode)
1924
          nodeslist.append(inst.snode)
1925

    
1926
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1927
      # Lock resources of instance's primary and secondary nodes (copy to
1928
      # prevent accidential modification)
1929
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1930

    
1931
  def DeclareLocks(self, level):
1932
    if level == locking.LEVEL_NODE_RES and \
1933
      self.opportunistic_locks[locking.LEVEL_NODE]:
1934
      # Even when using opportunistic locking, we require the same set of
1935
      # NODE_RES locks as we got NODE locks
1936
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1937
        self.owned_locks(locking.LEVEL_NODE)
1938

    
1939
  def CheckPrereq(self):
1940
    """Check prerequisite.
1941

1942
    """
1943
    if self.op.iallocator:
1944
      cluster = self.cfg.GetClusterInfo()
1945
      default_vg = self.cfg.GetVGName()
1946
      ec_id = self.proc.GetECId()
1947

    
1948
      if self.op.opportunistic_locking:
1949
        # Only consider nodes for which a lock is held
1950
        node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1951
      else:
1952
        node_whitelist = None
1953

    
1954
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1955
                                           _ComputeNics(op, cluster, None,
1956
                                                        self.cfg, ec_id),
1957
                                           _ComputeFullBeParams(op, cluster),
1958
                                           node_whitelist)
1959
               for op in self.op.instances]
1960

    
1961
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1962
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1963

    
1964
      ial.Run(self.op.iallocator)
1965

    
1966
      if not ial.success:
1967
        raise errors.OpPrereqError("Can't compute nodes using"
1968
                                   " iallocator '%s': %s" %
1969
                                   (self.op.iallocator, ial.info),
1970
                                   errors.ECODE_NORES)
1971

    
1972
      self.ia_result = ial.result
1973

    
1974
    if self.op.dry_run:
1975
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1976
        constants.JOB_IDS_KEY: [],
1977
        })
1978

    
1979
  def _ConstructPartialResult(self):
1980
    """Contructs the partial result.
1981

1982
    """
1983
    if self.op.iallocator:
1984
      (allocatable, failed_insts) = self.ia_result
1985
      allocatable_insts = map(compat.fst, allocatable)
1986
    else:
1987
      allocatable_insts = [op.instance_name for op in self.op.instances]
1988
      failed_insts = []
1989

    
1990
    return {
1991
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
1992
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
1993
      }
1994

    
1995
  def Exec(self, feedback_fn):
1996
    """Executes the opcode.
1997

1998
    """
1999
    jobs = []
2000
    if self.op.iallocator:
2001
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2002
      (allocatable, failed) = self.ia_result
2003

    
2004
      for (name, nodes) in allocatable:
2005
        op = op2inst.pop(name)
2006

    
2007
        if len(nodes) > 1:
2008
          (op.pnode, op.snode) = nodes
2009
        else:
2010
          (op.pnode,) = nodes
2011

    
2012
        jobs.append([op])
2013

    
2014
      missing = set(op2inst.keys()) - set(failed)
2015
      assert not missing, \
2016
        "Iallocator did return incomplete result: %s" % \
2017
        utils.CommaJoin(missing)
2018
    else:
2019
      jobs.extend([op] for op in self.op.instances)
2020

    
2021
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2022

    
2023

    
2024
class _InstNicModPrivate:
2025
  """Data structure for network interface modifications.
2026

2027
  Used by L{LUInstanceSetParams}.
2028

2029
  """
2030
  def __init__(self):
2031
    self.params = None
2032
    self.filled = None
2033

    
2034

    
2035
def _PrepareContainerMods(mods, private_fn):
2036
  """Prepares a list of container modifications by adding a private data field.
2037

2038
  @type mods: list of tuples; (operation, index, parameters)
2039
  @param mods: List of modifications
2040
  @type private_fn: callable or None
2041
  @param private_fn: Callable for constructing a private data field for a
2042
    modification
2043
  @rtype: list
2044

2045
  """
2046
  if private_fn is None:
2047
    fn = lambda: None
2048
  else:
2049
    fn = private_fn
2050

    
2051
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2052

    
2053

    
2054
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
2055
  """Checks if nodes have enough physical CPUs
2056

2057
  This function checks if all given nodes have the needed number of
2058
  physical CPUs. In case any node has less CPUs or we cannot get the
2059
  information from the node, this function raises an OpPrereqError
2060
  exception.
2061

2062
  @type lu: C{LogicalUnit}
2063
  @param lu: a logical unit from which we get configuration data
2064
  @type nodenames: C{list}
2065
  @param nodenames: the list of node names to check
2066
  @type requested: C{int}
2067
  @param requested: the minimum acceptable number of physical CPUs
2068
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2069
      or we cannot check the node
2070

2071
  """
2072
  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2073
  for node in nodenames:
2074
    info = nodeinfo[node]
2075
    info.Raise("Cannot get current information from node %s" % node,
2076
               prereq=True, ecode=errors.ECODE_ENVIRON)
2077
    (_, _, (hv_info, )) = info.payload
2078
    num_cpus = hv_info.get("cpu_total", None)
2079
    if not isinstance(num_cpus, int):
2080
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2081
                                 " on node %s, result was '%s'" %
2082
                                 (node, num_cpus), errors.ECODE_ENVIRON)
2083
    if requested > num_cpus:
2084
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2085
                                 "required" % (node, num_cpus, requested),
2086
                                 errors.ECODE_NORES)
2087

    
2088

    
2089
def GetItemFromContainer(identifier, kind, container):
2090
  """Return the item refered by the identifier.
2091

2092
  @type identifier: string
2093
  @param identifier: Item index or name or UUID
2094
  @type kind: string
2095
  @param kind: One-word item description
2096
  @type container: list
2097
  @param container: Container to get the item from
2098

2099
  """
2100
  # Index
2101
  try:
2102
    idx = int(identifier)
2103
    if idx == -1:
2104
      # Append
2105
      absidx = len(container) - 1
2106
    elif idx < 0:
2107
      raise IndexError("Not accepting negative indices other than -1")
2108
    elif idx > len(container):
2109
      raise IndexError("Got %s index %s, but there are only %s" %
2110
                       (kind, idx, len(container)))
2111
    else:
2112
      absidx = idx
2113
    return (absidx, container[idx])
2114
  except ValueError:
2115
    pass
2116

    
2117
  for idx, item in enumerate(container):
2118
    if item.uuid == identifier or item.name == identifier:
2119
      return (idx, item)
2120

    
2121
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2122
                             (kind, identifier), errors.ECODE_NOENT)
2123

    
2124

    
2125
def _ApplyContainerMods(kind, container, chgdesc, mods,
2126
                        create_fn, modify_fn, remove_fn):
2127
  """Applies descriptions in C{mods} to C{container}.
2128

2129
  @type kind: string
2130
  @param kind: One-word item description
2131
  @type container: list
2132
  @param container: Container to modify
2133
  @type chgdesc: None or list
2134
  @param chgdesc: List of applied changes
2135
  @type mods: list
2136
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2137
  @type create_fn: callable
2138
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2139
    receives absolute item index, parameters and private data object as added
2140
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2141
    as list
2142
  @type modify_fn: callable
2143
  @param modify_fn: Callback for modifying an existing item
2144
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2145
    and private data object as added by L{_PrepareContainerMods}, returns
2146
    changes as list
2147
  @type remove_fn: callable
2148
  @param remove_fn: Callback on removing item; receives absolute item index,
2149
    item and private data object as added by L{_PrepareContainerMods}
2150

2151
  """
2152
  for (op, identifier, params, private) in mods:
2153
    changes = None
2154

    
2155
    if op == constants.DDM_ADD:
2156
      # Calculate where item will be added
2157
      # When adding an item, identifier can only be an index
2158
      try:
2159
        idx = int(identifier)
2160
      except ValueError:
2161
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2162
                                   " identifier for %s" % constants.DDM_ADD,
2163
                                   errors.ECODE_INVAL)
2164
      if idx == -1:
2165
        addidx = len(container)
2166
      else:
2167
        if idx < 0:
2168
          raise IndexError("Not accepting negative indices other than -1")
2169
        elif idx > len(container):
2170
          raise IndexError("Got %s index %s, but there are only %s" %
2171
                           (kind, idx, len(container)))
2172
        addidx = idx
2173

    
2174
      if create_fn is None:
2175
        item = params
2176
      else:
2177
        (item, changes) = create_fn(addidx, params, private)
2178

    
2179
      if idx == -1:
2180
        container.append(item)
2181
      else:
2182
        assert idx >= 0
2183
        assert idx <= len(container)
2184
        # list.insert does so before the specified index
2185
        container.insert(idx, item)
2186
    else:
2187
      # Retrieve existing item
2188
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2189

    
2190
      if op == constants.DDM_REMOVE:
2191
        assert not params
2192

    
2193
        changes = [("%s/%s" % (kind, absidx), "remove")]
2194

    
2195
        if remove_fn is not None:
2196
          msg = remove_fn(absidx, item, private)
2197
          if msg:
2198
            changes.append(("%s/%s" % (kind, absidx), msg))
2199

    
2200
        assert container[absidx] == item
2201
        del container[absidx]
2202
      elif op == constants.DDM_MODIFY:
2203
        if modify_fn is not None:
2204
          changes = modify_fn(absidx, item, params, private)
2205
      else:
2206
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2207

    
2208
    assert _TApplyContModsCbChanges(changes)
2209

    
2210
    if not (chgdesc is None or changes is None):
2211
      chgdesc.extend(changes)
2212

    
2213

    
2214
def _UpdateIvNames(base_index, disks):
2215
  """Updates the C{iv_name} attribute of disks.
2216

2217
  @type disks: list of L{objects.Disk}
2218

2219
  """
2220
  for (idx, disk) in enumerate(disks):
2221
    disk.iv_name = "disk/%s" % (base_index + idx, )
2222

    
2223

    
2224
class LUInstanceSetParams(LogicalUnit):
2225
  """Modifies an instances's parameters.
2226

2227
  """
2228
  HPATH = "instance-modify"
2229
  HTYPE = constants.HTYPE_INSTANCE
2230
  REQ_BGL = False
2231

    
2232
  @staticmethod
2233
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2234
    assert ht.TList(mods)
2235
    assert not mods or len(mods[0]) in (2, 3)
2236

    
2237
    if mods and len(mods[0]) == 2:
2238
      result = []
2239

    
2240
      addremove = 0
2241
      for op, params in mods:
2242
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2243
          result.append((op, -1, params))
2244
          addremove += 1
2245

    
2246
          if addremove > 1:
2247
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2248
                                       " supported at a time" % kind,
2249
                                       errors.ECODE_INVAL)
2250
        else:
2251
          result.append((constants.DDM_MODIFY, op, params))
2252

    
2253
      assert verify_fn(result)
2254
    else:
2255
      result = mods
2256

    
2257
    return result
2258

    
2259
  @staticmethod
2260
  def _CheckMods(kind, mods, key_types, item_fn):
2261
    """Ensures requested disk/NIC modifications are valid.
2262

2263
    """
2264
    for (op, _, params) in mods:
2265
      assert ht.TDict(params)
2266

    
2267
      # If 'key_types' is an empty dict, we assume we have an
2268
      # 'ext' template and thus do not ForceDictType
2269
      if key_types:
2270
        utils.ForceDictType(params, key_types)
2271

    
2272
      if op == constants.DDM_REMOVE:
2273
        if params:
2274
          raise errors.OpPrereqError("No settings should be passed when"
2275
                                     " removing a %s" % kind,
2276
                                     errors.ECODE_INVAL)
2277
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2278
        item_fn(op, params)
2279
      else:
2280
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2281

    
2282
  def _VerifyDiskModification(self, op, params):
2283
    """Verifies a disk modification.
2284

2285
    """
2286
    if op == constants.DDM_ADD:
2287
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2288
      if mode not in constants.DISK_ACCESS_SET:
2289
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2290
                                   errors.ECODE_INVAL)
2291

    
2292
      size = params.get(constants.IDISK_SIZE, None)
2293
      if size is None:
2294
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2295
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2296

    
2297
      try:
2298
        size = int(size)
2299
      except (TypeError, ValueError), err:
2300
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2301
                                   errors.ECODE_INVAL)
2302

    
2303
      params[constants.IDISK_SIZE] = size
2304
      name = params.get(constants.IDISK_NAME, None)
2305
      if name is not None and name.lower() == constants.VALUE_NONE:
2306
        params[constants.IDISK_NAME] = None
2307

    
2308
    elif op == constants.DDM_MODIFY:
2309
      if constants.IDISK_SIZE in params:
2310
        raise errors.OpPrereqError("Disk size change not possible, use"
2311
                                   " grow-disk", errors.ECODE_INVAL)
2312

    
2313
      # Disk modification supports changing only the disk name and mode.
2314
      # Changing arbitrary parameters is allowed only for ext disk template",
2315
      if self.instance.disk_template != constants.DT_EXT:
2316
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2317

    
2318
      name = params.get(constants.IDISK_NAME, None)
2319
      if name is not None and name.lower() == constants.VALUE_NONE:
2320
        params[constants.IDISK_NAME] = None
2321

    
2322
  @staticmethod
2323
  def _VerifyNicModification(op, params):
2324
    """Verifies a network interface modification.
2325

2326
    """
2327
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2328
      ip = params.get(constants.INIC_IP, None)
2329
      name = params.get(constants.INIC_NAME, None)
2330
      req_net = params.get(constants.INIC_NETWORK, None)
2331
      link = params.get(constants.NIC_LINK, None)
2332
      mode = params.get(constants.NIC_MODE, None)
2333
      if name is not None and name.lower() == constants.VALUE_NONE:
2334
        params[constants.INIC_NAME] = None
2335
      if req_net is not None:
2336
        if req_net.lower() == constants.VALUE_NONE:
2337
          params[constants.INIC_NETWORK] = None
2338
          req_net = None
2339
        elif link is not None or mode is not None:
2340
          raise errors.OpPrereqError("If network is given"
2341
                                     " mode or link should not",
2342
                                     errors.ECODE_INVAL)
2343

    
2344
      if op == constants.DDM_ADD:
2345
        macaddr = params.get(constants.INIC_MAC, None)
2346
        if macaddr is None:
2347
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2348

    
2349
      if ip is not None:
2350
        if ip.lower() == constants.VALUE_NONE:
2351
          params[constants.INIC_IP] = None
2352
        else:
2353
          if ip.lower() == constants.NIC_IP_POOL:
2354
            if op == constants.DDM_ADD and req_net is None:
2355
              raise errors.OpPrereqError("If ip=pool, parameter network"
2356
                                         " cannot be none",
2357
                                         errors.ECODE_INVAL)
2358
          else:
2359
            if not netutils.IPAddress.IsValid(ip):
2360
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2361
                                         errors.ECODE_INVAL)
2362

    
2363
      if constants.INIC_MAC in params:
2364
        macaddr = params[constants.INIC_MAC]
2365
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2366
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2367

    
2368
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2369
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2370
                                     " modifying an existing NIC",
2371
                                     errors.ECODE_INVAL)
2372

    
2373
  def CheckArguments(self):
2374
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2375
            self.op.hvparams or self.op.beparams or self.op.os_name or
2376
            self.op.osparams or self.op.offline is not None or
2377
            self.op.runtime_mem or self.op.pnode):
2378
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2379

    
2380
    if self.op.hvparams:
2381
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2382
                           "hypervisor", "instance", "cluster")
2383

    
2384
    self.op.disks = self._UpgradeDiskNicMods(
2385
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2386
    self.op.nics = self._UpgradeDiskNicMods(
2387
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2388

    
2389
    if self.op.disks and self.op.disk_template is not None:
2390
      raise errors.OpPrereqError("Disk template conversion and other disk"
2391
                                 " changes not supported at the same time",
2392
                                 errors.ECODE_INVAL)
2393

    
2394
    if (self.op.disk_template and
2395
        self.op.disk_template in constants.DTS_INT_MIRROR and
2396
        self.op.remote_node is None):
2397
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2398
                                 " one requires specifying a secondary node",
2399
                                 errors.ECODE_INVAL)
2400

    
2401
    # Check NIC modifications
2402
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2403
                    self._VerifyNicModification)
2404

    
2405
    if self.op.pnode:
2406
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2407

    
2408
  def ExpandNames(self):
2409
    self._ExpandAndLockInstance()
2410
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2411
    # Can't even acquire node locks in shared mode as upcoming changes in
2412
    # Ganeti 2.6 will start to modify the node object on disk conversion
2413
    self.needed_locks[locking.LEVEL_NODE] = []
2414
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2415
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2416
    # Look node group to look up the ipolicy
2417
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2418

    
2419
  def DeclareLocks(self, level):
2420
    if level == locking.LEVEL_NODEGROUP:
2421
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2422
      # Acquire locks for the instance's nodegroups optimistically. Needs
2423
      # to be verified in CheckPrereq
2424
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2425
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2426
    elif level == locking.LEVEL_NODE:
2427
      self._LockInstancesNodes()
2428
      if self.op.disk_template and self.op.remote_node:
2429
        self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2430
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2431
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2432
      # Copy node locks
2433
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2434
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2435

    
2436
  def BuildHooksEnv(self):
2437
    """Build hooks env.
2438

2439
    This runs on the master, primary and secondaries.
2440

2441
    """
2442
    args = {}
2443
    if constants.BE_MINMEM in self.be_new:
2444
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2445
    if constants.BE_MAXMEM in self.be_new:
2446
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2447
    if constants.BE_VCPUS in self.be_new:
2448
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2449
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2450
    # information at all.
2451

    
2452
    if self._new_nics is not None:
2453
      nics = []
2454

    
2455
      for nic in self._new_nics:
2456
        n = copy.deepcopy(nic)
2457
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2458
        n.nicparams = nicparams
2459
        nics.append(NICToTuple(self, n))
2460

    
2461
      args["nics"] = nics
2462

    
2463
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2464
    if self.op.disk_template:
2465
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2466
    if self.op.runtime_mem:
2467
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2468

    
2469
    return env
2470

    
2471
  def BuildHooksNodes(self):
2472
    """Build hooks nodes.
2473

2474
    """
2475
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2476
    return (nl, nl)
2477

    
2478
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2479
                              old_params, cluster, pnode):
2480

    
2481
    update_params_dict = dict([(key, params[key])
2482
                               for key in constants.NICS_PARAMETERS
2483
                               if key in params])
2484

    
2485
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2486
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2487

    
2488
    new_net_uuid = None
2489
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2490
    if new_net_uuid_or_name:
2491
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2492
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2493

    
2494
    if old_net_uuid:
2495
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2496

    
2497
    if new_net_uuid:
2498
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2499
      if not netparams:
2500
        raise errors.OpPrereqError("No netparams found for the network"
2501
                                   " %s, probably not connected" %
2502
                                   new_net_obj.name, errors.ECODE_INVAL)
2503
      new_params = dict(netparams)
2504
    else:
2505
      new_params = GetUpdatedParams(old_params, update_params_dict)
2506

    
2507
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2508

    
2509
    new_filled_params = cluster.SimpleFillNIC(new_params)
2510
    objects.NIC.CheckParameterSyntax(new_filled_params)
2511

    
2512
    new_mode = new_filled_params[constants.NIC_MODE]
2513
    if new_mode == constants.NIC_MODE_BRIDGED:
2514
      bridge = new_filled_params[constants.NIC_LINK]
2515
      msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2516
      if msg:
2517
        msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2518
        if self.op.force:
2519
          self.warn.append(msg)
2520
        else:
2521
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2522

    
2523
    elif new_mode == constants.NIC_MODE_ROUTED:
2524
      ip = params.get(constants.INIC_IP, old_ip)
2525
      if ip is None:
2526
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2527
                                   " on a routed NIC", errors.ECODE_INVAL)
2528

    
2529
    elif new_mode == constants.NIC_MODE_OVS:
2530
      # TODO: check OVS link
2531
      self.LogInfo("OVS links are currently not checked for correctness")
2532

    
2533
    if constants.INIC_MAC in params:
2534
      mac = params[constants.INIC_MAC]
2535
      if mac is None:
2536
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2537
                                   errors.ECODE_INVAL)
2538
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2539
        # otherwise generate the MAC address
2540
        params[constants.INIC_MAC] = \
2541
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2542
      else:
2543
        # or validate/reserve the current one
2544
        try:
2545
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2546
        except errors.ReservationError:
2547
          raise errors.OpPrereqError("MAC address '%s' already in use"
2548
                                     " in cluster" % mac,
2549
                                     errors.ECODE_NOTUNIQUE)
2550
    elif new_net_uuid != old_net_uuid:
2551

    
2552
      def get_net_prefix(net_uuid):
2553
        mac_prefix = None
2554
        if net_uuid:
2555
          nobj = self.cfg.GetNetwork(net_uuid)
2556
          mac_prefix = nobj.mac_prefix
2557

    
2558
        return mac_prefix
2559

    
2560
      new_prefix = get_net_prefix(new_net_uuid)
2561
      old_prefix = get_net_prefix(old_net_uuid)
2562
      if old_prefix != new_prefix:
2563
        params[constants.INIC_MAC] = \
2564
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2565

    
2566
    # if there is a change in (ip, network) tuple
2567
    new_ip = params.get(constants.INIC_IP, old_ip)
2568
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2569
      if new_ip:
2570
        # if IP is pool then require a network and generate one IP
2571
        if new_ip.lower() == constants.NIC_IP_POOL:
2572
          if new_net_uuid:
2573
            try:
2574
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2575
            except errors.ReservationError:
2576
              raise errors.OpPrereqError("Unable to get a free IP"
2577
                                         " from the address pool",
2578
                                         errors.ECODE_STATE)
2579
            self.LogInfo("Chose IP %s from network %s",
2580
                         new_ip,
2581
                         new_net_obj.name)
2582
            params[constants.INIC_IP] = new_ip
2583
          else:
2584
            raise errors.OpPrereqError("ip=pool, but no network found",
2585
                                       errors.ECODE_INVAL)
2586
        # Reserve new IP if in the new network if any
2587
        elif new_net_uuid:
2588
          try:
2589
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2590
            self.LogInfo("Reserving IP %s in network %s",
2591
                         new_ip, new_net_obj.name)
2592
          except errors.ReservationError:
2593
            raise errors.OpPrereqError("IP %s not available in network %s" %
2594
                                       (new_ip, new_net_obj.name),
2595
                                       errors.ECODE_NOTUNIQUE)
2596
        # new network is None so check if new IP is a conflicting IP
2597
        elif self.op.conflicts_check:
2598
          _CheckForConflictingIp(self, new_ip, pnode)
2599

    
2600
      # release old IP if old network is not None
2601
      if old_ip and old_net_uuid:
2602
        try:
2603
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2604
        except errors.AddressPoolError:
2605
          logging.warning("Release IP %s not contained in network %s",
2606
                          old_ip, old_net_obj.name)
2607

    
2608
    # there are no changes in (ip, network) tuple and old network is not None
2609
    elif (old_net_uuid is not None and
2610
          (req_link is not None or req_mode is not None)):
2611
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2612
                                 " a NIC that is connected to a network",
2613
                                 errors.ECODE_INVAL)
2614

    
2615
    private.params = new_params
2616
    private.filled = new_filled_params
2617

    
2618
  def _PreCheckDiskTemplate(self, pnode_info):
2619
    """CheckPrereq checks related to a new disk template."""
2620
    # Arguments are passed to avoid configuration lookups
2621
    instance = self.instance
2622
    pnode = instance.primary_node
2623
    cluster = self.cluster
2624
    if instance.disk_template == self.op.disk_template:
2625
      raise errors.OpPrereqError("Instance already has disk template %s" %
2626
                                 instance.disk_template, errors.ECODE_INVAL)
2627

    
2628
    if (instance.disk_template,
2629
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2630
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2631
                                 " %s to %s" % (instance.disk_template,
2632
                                                self.op.disk_template),
2633
                                 errors.ECODE_INVAL)
2634
    CheckInstanceState(self, instance, INSTANCE_DOWN,
2635
                       msg="cannot change disk template")
2636
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2637
      if self.op.remote_node == pnode:
2638
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2639
                                   " as the primary node of the instance" %
2640
                                   self.op.remote_node, errors.ECODE_STATE)
2641
      CheckNodeOnline(self, self.op.remote_node)
2642
      CheckNodeNotDrained(self, self.op.remote_node)
2643
      # FIXME: here we assume that the old instance type is DT_PLAIN
2644
      assert instance.disk_template == constants.DT_PLAIN
2645
      disks = [{constants.IDISK_SIZE: d.size,
2646
                constants.IDISK_VG: d.logical_id[0]}
2647
               for d in instance.disks]
2648
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2649
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2650

    
2651
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2652
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2653
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2654
                                                              snode_group)
2655
      CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2656
                             ignore=self.op.ignore_ipolicy)
2657
      if pnode_info.group != snode_info.group:
2658
        self.LogWarning("The primary and secondary nodes are in two"
2659
                        " different node groups; the disk parameters"
2660
                        " from the first disk's node group will be"
2661
                        " used")
2662

    
2663
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2664
      # Make sure none of the nodes require exclusive storage
2665
      nodes = [pnode_info]
2666
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2667
        assert snode_info
2668
        nodes.append(snode_info)
2669
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2670
      if compat.any(map(has_es, nodes)):
2671
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2672
                  " storage is enabled" % (instance.disk_template,
2673
                                           self.op.disk_template))
2674
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2675

    
2676
  def CheckPrereq(self):
2677
    """Check prerequisites.
2678

2679
    This only checks the instance list against the existing names.
2680

2681
    """
2682
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2683
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2684

    
2685
    cluster = self.cluster = self.cfg.GetClusterInfo()
2686
    assert self.instance is not None, \
2687
      "Cannot retrieve locked instance %s" % self.op.instance_name
2688

    
2689
    pnode = instance.primary_node
2690

    
2691
    self.warn = []
2692

    
2693
    if (self.op.pnode is not None and self.op.pnode != pnode and
2694
        not self.op.force):
2695
      # verify that the instance is not up
2696
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2697
                                                  instance.hypervisor)
2698
      if instance_info.fail_msg:
2699
        self.warn.append("Can't get instance runtime information: %s" %
2700
                         instance_info.fail_msg)
2701
      elif instance_info.payload:
2702
        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2703
                                   errors.ECODE_STATE)
2704

    
2705
    assert pnode in self.owned_locks(locking.LEVEL_NODE)
2706
    nodelist = list(instance.all_nodes)
2707
    pnode_info = self.cfg.GetNodeInfo(pnode)
2708
    self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2709

    
2710
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2711
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2712
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2713

    
2714
    # dictionary with instance information after the modification
2715
    ispec = {}
2716

    
2717
    # Check disk modifications. This is done here and not in CheckArguments
2718
    # (as with NICs), because we need to know the instance's disk template
2719
    if instance.disk_template == constants.DT_EXT:
2720
      self._CheckMods("disk", self.op.disks, {},
2721
                      self._VerifyDiskModification)
2722
    else:
2723
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2724
                      self._VerifyDiskModification)
2725

    
2726
    # Prepare disk/NIC modifications
2727
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2728
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2729

    
2730
    # Check the validity of the `provider' parameter
2731
    if instance.disk_template in constants.DT_EXT:
2732
      for mod in self.diskmod:
2733
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2734
        if mod[0] == constants.DDM_ADD:
2735
          if ext_provider is None:
2736
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2737
                                       " '%s' missing, during disk add" %
2738
                                       (constants.DT_EXT,
2739
                                        constants.IDISK_PROVIDER),
2740
                                       errors.ECODE_NOENT)
2741
        elif mod[0] == constants.DDM_MODIFY:
2742
          if ext_provider:
2743
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2744
                                       " modification" %
2745
                                       constants.IDISK_PROVIDER,
2746
                                       errors.ECODE_INVAL)
2747
    else:
2748
      for mod in self.diskmod:
2749
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2750
        if ext_provider is not None:
2751
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2752
                                     " instances of type '%s'" %
2753
                                     (constants.IDISK_PROVIDER,
2754
                                      constants.DT_EXT),
2755
                                     errors.ECODE_INVAL)
2756

    
2757
    # OS change
2758
    if self.op.os_name and not self.op.force:
2759
      CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2760
                     self.op.force_variant)
2761
      instance_os = self.op.os_name
2762
    else:
2763
      instance_os = instance.os
2764

    
2765
    assert not (self.op.disk_template and self.op.disks), \
2766
      "Can't modify disk template and apply disk changes at the same time"
2767

    
2768
    if self.op.disk_template:
2769
      self._PreCheckDiskTemplate(pnode_info)
2770

    
2771
    # hvparams processing
2772
    if self.op.hvparams:
2773
      hv_type = instance.hypervisor
2774
      i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2775
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2776
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2777

    
2778
      # local check
2779
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2780
      CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2781
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2782
      self.hv_inst = i_hvdict # the new dict (without defaults)
2783
    else:
2784
      self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2785
                                              instance.hvparams)
2786
      self.hv_new = self.hv_inst = {}
2787

    
2788
    # beparams processing
2789
    if self.op.beparams:
2790
      i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2791
                                  use_none=True)
2792
      objects.UpgradeBeParams(i_bedict)
2793
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2794
      be_new = cluster.SimpleFillBE(i_bedict)
2795
      self.be_proposed = self.be_new = be_new # the new actual values
2796
      self.be_inst = i_bedict # the new dict (without defaults)
2797
    else:
2798
      self.be_new = self.be_inst = {}
2799
      self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2800
    be_old = cluster.FillBE(instance)
2801

    
2802
    # CPU param validation -- checking every time a parameter is
2803
    # changed to cover all cases where either CPU mask or vcpus have
2804
    # changed
2805
    if (constants.BE_VCPUS in self.be_proposed and
2806
        constants.HV_CPU_MASK in self.hv_proposed):
2807
      cpu_list = \
2808
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2809
      # Verify mask is consistent with number of vCPUs. Can skip this
2810
      # test if only 1 entry in the CPU mask, which means same mask
2811
      # is applied to all vCPUs.
2812
      if (len(cpu_list) > 1 and
2813
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2814
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2815
                                   " CPU mask [%s]" %
2816
                                   (self.be_proposed[constants.BE_VCPUS],
2817
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2818
                                   errors.ECODE_INVAL)
2819

    
2820
      # Only perform this test if a new CPU mask is given
2821
      if constants.HV_CPU_MASK in self.hv_new:
2822
        # Calculate the largest CPU number requested
2823
        max_requested_cpu = max(map(max, cpu_list))
2824
        # Check that all of the instance's nodes have enough physical CPUs to
2825
        # satisfy the requested CPU mask
2826
        _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2827
                                max_requested_cpu + 1, instance.hypervisor)
2828

    
2829
    # osparams processing
2830
    if self.op.osparams:
2831
      i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2832
      CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2833
      self.os_inst = i_osdict # the new dict (without defaults)
2834
    else:
2835
      self.os_inst = {}
2836

    
2837
    #TODO(dynmem): do the appropriate check involving MINMEM
2838
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2839
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2840
      mem_check_list = [pnode]
2841
      if be_new[constants.BE_AUTO_BALANCE]:
2842
        # either we changed auto_balance to yes or it was from before
2843
        mem_check_list.extend(instance.secondary_nodes)
2844
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2845
                                                  instance.hypervisor)
2846
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2847
                                         [instance.hypervisor], False)
2848
      pninfo = nodeinfo[pnode]
2849
      msg = pninfo.fail_msg
2850
      if msg:
2851
        # Assume the primary node is unreachable and go ahead
2852
        self.warn.append("Can't get info from primary node %s: %s" %
2853
                         (pnode, msg))
2854
      else:
2855
        (_, _, (pnhvinfo, )) = pninfo.payload
2856
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2857
          self.warn.append("Node data from primary node %s doesn't contain"
2858
                           " free memory information" % pnode)
2859
        elif instance_info.fail_msg:
2860
          self.warn.append("Can't get instance runtime information: %s" %
2861
                           instance_info.fail_msg)
2862
        else:
2863
          if instance_info.payload:
2864
            current_mem = int(instance_info.payload["memory"])
2865
          else:
2866
            # Assume instance not running
2867
            # (there is a slight race condition here, but it's not very
2868
            # probable, and we have no other way to check)
2869
            # TODO: Describe race condition
2870
            current_mem = 0
2871
          #TODO(dynmem): do the appropriate check involving MINMEM
2872
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2873
                      pnhvinfo["memory_free"])
2874
          if miss_mem > 0:
2875
            raise errors.OpPrereqError("This change will prevent the instance"
2876
                                       " from starting, due to %d MB of memory"
2877
                                       " missing on its primary node" %
2878
                                       miss_mem, errors.ECODE_NORES)
2879

    
2880
      if be_new[constants.BE_AUTO_BALANCE]:
2881
        for node, nres in nodeinfo.items():
2882
          if node not in instance.secondary_nodes:
2883
            continue
2884
          nres.Raise("Can't get info from secondary node %s" % node,
2885
                     prereq=True, ecode=errors.ECODE_STATE)
2886
          (_, _, (nhvinfo, )) = nres.payload
2887
          if not isinstance(nhvinfo.get("memory_free", None), int):
2888
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2889
                                       " memory information" % node,
2890
                                       errors.ECODE_STATE)
2891
          #TODO(dynmem): do the appropriate check involving MINMEM
2892
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2893
            raise errors.OpPrereqError("This change will prevent the instance"
2894
                                       " from failover to its secondary node"
2895
                                       " %s, due to not enough memory" % node,
2896
                                       errors.ECODE_STATE)
2897

    
2898
    if self.op.runtime_mem:
2899
      remote_info = self.rpc.call_instance_info(instance.primary_node,
2900
                                                instance.name,
2901
                                                instance.hypervisor)
2902
      remote_info.Raise("Error checking node %s" % instance.primary_node)
2903
      if not remote_info.payload: # not running already
2904
        raise errors.OpPrereqError("Instance %s is not running" %
2905
                                   instance.name, errors.ECODE_STATE)
2906

    
2907
      current_memory = remote_info.payload["memory"]
2908
      if (not self.op.force and
2909
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2910
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2911
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2912
                                   " and %d MB of memory unless --force is"
2913
                                   " given" %
2914
                                   (instance.name,
2915
                                    self.be_proposed[constants.BE_MINMEM],
2916
                                    self.be_proposed[constants.BE_MAXMEM]),
2917
                                   errors.ECODE_INVAL)
2918

    
2919
      delta = self.op.runtime_mem - current_memory
2920
      if delta > 0:
2921
        CheckNodeFreeMemory(self, instance.primary_node,
2922
                            "ballooning memory for instance %s" %
2923
                            instance.name, delta, instance.hypervisor)
2924

    
2925
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2926
      raise errors.OpPrereqError("Disk operations not supported for"
2927
                                 " diskless instances", errors.ECODE_INVAL)
2928

    
2929
    def _PrepareNicCreate(_, params, private):
2930
      self._PrepareNicModification(params, private, None, None,
2931
                                   {}, cluster, pnode)
2932
      return (None, None)
2933

    
2934
    def _PrepareNicMod(_, nic, params, private):
2935
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2936
                                   nic.nicparams, cluster, pnode)
2937
      return None
2938

    
2939
    def _PrepareNicRemove(_, params, __):
2940
      ip = params.ip
2941
      net = params.network
2942
      if net is not None and ip is not None:
2943
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2944

    
2945
    # Verify NIC changes (operating on copy)
2946
    nics = instance.nics[:]
2947
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2948
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2949
    if len(nics) > constants.MAX_NICS:
2950
      raise errors.OpPrereqError("Instance has too many network interfaces"
2951
                                 " (%d), cannot add more" % constants.MAX_NICS,
2952
                                 errors.ECODE_STATE)
2953

    
2954
    def _PrepareDiskMod(_, disk, params, __):
2955
      disk.name = params.get(constants.IDISK_NAME, None)
2956

    
2957
    # Verify disk changes (operating on a copy)
2958
    disks = copy.deepcopy(instance.disks)
2959
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2960
                        _PrepareDiskMod, None)
2961
    utils.ValidateDeviceNames("disk", disks)
2962
    if len(disks) > constants.MAX_DISKS:
2963
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2964
                                 " more" % constants.MAX_DISKS,
2965
                                 errors.ECODE_STATE)
2966
    disk_sizes = [disk.size for disk in instance.disks]
2967
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2968
                      self.diskmod if op == constants.DDM_ADD)
2969
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2970
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2971

    
2972
    if self.op.offline is not None and self.op.offline:
2973
      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2974
                         msg="can't change to offline")
2975

    
2976
    # Pre-compute NIC changes (necessary to use result in hooks)
2977
    self._nic_chgdesc = []
2978
    if self.nicmod:
2979
      # Operate on copies as this is still in prereq
2980
      nics = [nic.Copy() for nic in instance.nics]
2981
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2982
                          self._CreateNewNic, self._ApplyNicMods,
2983
                          self._RemoveNic)
2984
      # Verify that NIC names are unique and valid
2985
      utils.ValidateDeviceNames("NIC", nics)
2986
      self._new_nics = nics
2987
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2988
    else:
2989
      self._new_nics = None
2990
      ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2991

    
2992
    if not self.op.ignore_ipolicy:
2993
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2994
                                                              group_info)
2995

    
2996
      # Fill ispec with backend parameters
2997
      ispec[constants.ISPEC_SPINDLE_USE] = \
2998
        self.be_new.get(constants.BE_SPINDLE_USE, None)
2999
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3000
                                                         None)
3001

    
3002
      # Copy ispec to verify parameters with min/max values separately
3003
      if self.op.disk_template:
3004
        new_disk_template = self.op.disk_template
3005
      else:
3006
        new_disk_template = instance.disk_template
3007
      ispec_max = ispec.copy()
3008
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3009
        self.be_new.get(constants.BE_MAXMEM, None)
3010
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3011
                                                     new_disk_template)
3012
      ispec_min = ispec.copy()
3013
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3014
        self.be_new.get(constants.BE_MINMEM, None)
3015
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3016
                                                     new_disk_template)
3017

    
3018
      if (res_max or res_min):
3019
        # FIXME: Improve error message by including information about whether
3020
        # the upper or lower limit of the parameter fails the ipolicy.
3021
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3022
               (group_info, group_info.name,
3023
                utils.CommaJoin(set(res_max + res_min))))
3024
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3025

    
3026
  def _ConvertPlainToDrbd(self, feedback_fn):
3027
    """Converts an instance from plain to drbd.
3028

3029
    """
3030
    feedback_fn("Converting template to drbd")
3031
    instance = self.instance
3032
    pnode = instance.primary_node
3033
    snode = self.op.remote_node
3034

    
3035
    assert instance.disk_template == constants.DT_PLAIN
3036

    
3037
    # create a fake disk info for _GenerateDiskTemplate
3038
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3039
                  constants.IDISK_VG: d.logical_id[0],
3040
                  constants.IDISK_NAME: d.name}
3041
                 for d in instance.disks]
3042
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3043
                                     instance.name, pnode, [snode],
3044
                                     disk_info, None, None, 0, feedback_fn,
3045
                                     self.diskparams)
3046
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3047
                                        self.diskparams)
3048
    p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
3049
    s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
3050
    info = GetInstanceInfoText(instance)
3051
    feedback_fn("Creating additional volumes...")
3052
    # first, create the missing data and meta devices
3053
    for disk in anno_disks:
3054
      # unfortunately this is... not too nice
3055
      CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3056
                           info, True, p_excl_stor)
3057
      for child in disk.children:
3058
        CreateSingleBlockDev(self, snode, instance, child, info, True,
3059
                             s_excl_stor)
3060
    # at this stage, all new LVs have been created, we can rename the
3061
    # old ones
3062
    feedback_fn("Renaming original volumes...")
3063
    rename_list = [(o, n.children[0].logical_id)
3064
                   for (o, n) in zip(instance.disks, new_disks)]
3065
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
3066
    result.Raise("Failed to rename original LVs")
3067

    
3068
    feedback_fn("Initializing DRBD devices...")
3069
    # all child devices are in place, we can now create the DRBD devices
3070
    try:
3071
      for disk in anno_disks:
3072
        for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3073
          f_create = node == pnode
3074
          CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3075
                               excl_stor)
3076
    except errors.GenericError, e:
3077
      feedback_fn("Initializing of DRBD devices failed;"
3078
                  " renaming back original volumes...")
3079
      for disk in new_disks:
3080
        self.cfg.SetDiskID(disk, pnode)
3081
      rename_back_list = [(n.children[0], o.logical_id)
3082
                          for (n, o) in zip(new_disks, instance.disks)]
3083
      result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3084
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3085
      raise
3086

    
3087
    # at this point, the instance has been modified
3088
    instance.disk_template = constants.DT_DRBD8
3089
    instance.disks = new_disks
3090
    self.cfg.Update(instance, feedback_fn)
3091

    
3092
    # Release node locks while waiting for sync
3093
    ReleaseLocks(self, locking.LEVEL_NODE)
3094

    
3095
    # disks are created, waiting for sync
3096
    disk_abort = not WaitForSync(self, instance,
3097
                                 oneshot=not self.op.wait_for_sync)
3098
    if disk_abort:
3099
      raise errors.OpExecError("There are some degraded disks for"
3100
                               " this instance, please cleanup manually")
3101

    
3102
    # Node resource locks will be released by caller
3103

    
3104
  def _ConvertDrbdToPlain(self, feedback_fn):
3105
    """Converts an instance from drbd to plain.
3106

3107
    """
3108
    instance = self.instance
3109

    
3110
    assert len(instance.secondary_nodes) == 1
3111
    assert instance.disk_template == constants.DT_DRBD8
3112

    
3113
    pnode = instance.primary_node
3114
    snode = instance.secondary_nodes[0]
3115
    feedback_fn("Converting template to plain")
3116

    
3117
    old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3118
    new_disks = [d.children[0] for d in instance.disks]
3119

    
3120
    # copy over size, mode and name
3121
    for parent, child in zip(old_disks, new_disks):
3122
      child.size = parent.size
3123
      child.mode = parent.mode
3124
      child.name = parent.name
3125

    
3126
    # this is a DRBD disk, return its port to the pool
3127
    # NOTE: this must be done right before the call to cfg.Update!
3128
    for disk in old_disks:
3129
      tcp_port = disk.logical_id[2]
3130
      self.cfg.AddTcpUdpPort(tcp_port)
3131

    
3132
    # update instance structure
3133
    instance.disks = new_disks
3134
    instance.disk_template = constants.DT_PLAIN
3135
    _UpdateIvNames(0, instance.disks)
3136
    self.cfg.Update(instance, feedback_fn)
3137

    
3138
    # Release locks in case removing disks takes a while
3139
    ReleaseLocks(self, locking.LEVEL_NODE)
3140

    
3141
    feedback_fn("Removing volumes on the secondary node...")
3142
    for disk in old_disks:
3143
      self.cfg.SetDiskID(disk, snode)
3144
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3145
      if msg:
3146
        self.LogWarning("Could not remove block device %s on node %s,"
3147
                        " continuing anyway: %s", disk.iv_name, snode, msg)
3148

    
3149
    feedback_fn("Removing unneeded volumes on the primary node...")
3150
    for idx, disk in enumerate(old_disks):
3151
      meta = disk.children[1]
3152
      self.cfg.SetDiskID(meta, pnode)
3153
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3154
      if msg:
3155
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3156
                        " continuing anyway: %s", idx, pnode, msg)
3157

    
3158
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3159
    self.LogInfo("Trying to hotplug device...")
3160
    msg = "hotplug:"
3161
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3162
                                          self.instance, action, dev_type,
3163
                                          (device, self.instance),
3164
                                          extra, seq)
3165
    if result.fail_msg:
3166
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3167
      self.LogInfo("Continuing execution..")
3168
      msg += "failed"
3169
    else:
3170
      self.LogInfo("Hotplug done.")
3171
      msg += "done"
3172
    return msg
3173

    
3174
  def _CreateNewDisk(self, idx, params, _):
3175
    """Creates a new disk.
3176

3177
    """
3178
    instance = self.instance
3179

    
3180
    # add a new disk
3181
    if instance.disk_template in constants.DTS_FILEBASED:
3182
      (file_driver, file_path) = instance.disks[0].logical_id
3183
      file_path = os.path.dirname(file_path)
3184
    else:
3185
      file_driver = file_path = None
3186

    
3187
    disk = \
3188
      GenerateDiskTemplate(self, instance.disk_template, instance.name,
3189
                           instance.primary_node, instance.secondary_nodes,
3190
                           [params], file_path, file_driver, idx,
3191
                           self.Log, self.diskparams)[0]
3192

    
3193
    new_disks = CreateDisks(self, instance, disks=[disk])
3194

    
3195
    if self.cluster.prealloc_wipe_disks:
3196
      # Wipe new disk
3197
      WipeOrCleanupDisks(self, instance,
3198
                         disks=[(idx, disk, 0)],
3199
                         cleanup=new_disks)
3200

    
3201
    changes = [
3202
      ("disk/%d" % idx,
3203
      "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3204
      ]
3205
    if self.op.hotplug:
3206
      self.cfg.SetDiskID(disk, self.instance.primary_node)
3207
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3208
                                               (disk, self.instance),
3209
                                               self.instance.name, True, idx)
3210
      if result.fail_msg:
3211
        changes.append(("disk/%d" % idx, "assemble:failed"))
3212
        self.LogWarning("Can't assemble newly created disk %d: %s",
3213
                        idx, result.fail_msg)
3214
      else:
3215
        _, link_name = result.payload
3216
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3217
                                  constants.HOTPLUG_TARGET_DISK,
3218
                                  disk, link_name, idx)
3219
        changes.append(("disk/%d" % idx, msg))
3220

    
3221
    return (disk, changes)
3222

    
3223
  def _ModifyDisk(self, idx, disk, params, _):
3224
    """Modifies a disk.
3225

3226
    """
3227
    changes = []
3228
    if constants.IDISK_MODE in params:
3229
      disk.mode = params.get(constants.IDISK_MODE)
3230
      changes.append(("disk.mode/%d" % idx, disk.mode))
3231

    
3232
    if constants.IDISK_NAME in params:
3233
      disk.name = params.get(constants.IDISK_NAME)
3234
      changes.append(("disk.name/%d" % idx, disk.name))
3235

    
3236
    # Modify arbitrary params in case instance template is ext
3237
    for key, value in params.iteritems():
3238
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3239
          self.instance.disk_template == constants.DT_EXT):
3240
        # stolen from GetUpdatedParams: default means reset/delete
3241
        if value.lower() == constants.VALUE_DEFAULT:
3242
          try:
3243
            del disk.params[key]
3244
          except KeyError:
3245
            pass
3246
        else:
3247
          disk.params[key] = value
3248
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3249

    
3250
    return changes
3251

    
3252
  def _RemoveDisk(self, idx, root, _):
3253
    """Removes a disk.
3254

3255
    """
3256
    hotmsg = ""
3257
    if self.op.hotplug:
3258
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3259
                                   constants.HOTPLUG_TARGET_DISK,
3260
                                   root, None, idx)
3261
      ShutdownInstanceDisks(self, self.instance, [root])
3262

    
3263
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3264
    for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3265
      self.cfg.SetDiskID(disk, node)
3266
      msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3267
      if msg:
3268
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3269
                        " continuing anyway", idx, node, msg)
3270

    
3271
    # if this is a DRBD disk, return its port to the pool
3272
    if root.dev_type in constants.LDS_DRBD:
3273
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3274

    
3275
    return hotmsg
3276

    
3277
  def _CreateNewNic(self, idx, params, private):
3278
    """Creates data structure for a new network interface.
3279

3280
    """
3281
    mac = params[constants.INIC_MAC]
3282
    ip = params.get(constants.INIC_IP, None)
3283
    net = params.get(constants.INIC_NETWORK, None)
3284
    name = params.get(constants.INIC_NAME, None)
3285
    net_uuid = self.cfg.LookupNetwork(net)
3286
    #TODO: not private.filled?? can a nic have no nicparams??
3287
    nicparams = private.filled
3288
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3289
                       nicparams=nicparams)
3290
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3291

    
3292
    changes = [
3293
      ("nic.%d" % idx,
3294
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3295
       (mac, ip, private.filled[constants.NIC_MODE],
3296
       private.filled[constants.NIC_LINK], net)),
3297
      ]
3298

    
3299
    if self.op.hotplug:
3300
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3301
                                constants.HOTPLUG_TARGET_NIC,
3302
                                nobj, None, idx)
3303
      changes.append(("nic.%d" % idx, msg))
3304

    
3305
    return (nobj, changes)
3306

    
3307
  def _ApplyNicMods(self, idx, nic, params, private):
3308
    """Modifies a network interface.
3309

3310
    """
3311
    changes = []
3312

    
3313
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3314
      if key in params:
3315
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3316
        setattr(nic, key, params[key])
3317

    
3318
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3319
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3320
    if new_net_uuid != nic.network:
3321
      changes.append(("nic.network/%d" % idx, new_net))
3322
      nic.network = new_net_uuid
3323

    
3324
    if private.filled:
3325
      nic.nicparams = private.filled
3326

    
3327
      for (key, val) in nic.nicparams.items():
3328
        changes.append(("nic.%s/%d" % (key, idx), val))
3329

    
3330
    if self.op.hotplug:
3331
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3332
                                constants.HOTPLUG_TARGET_NIC,
3333
                                nic, None, idx)
3334
      changes.append(("nic/%d" % idx, msg))
3335

    
3336
    return changes
3337

    
3338
  def _RemoveNic(self, idx, nic, _):
3339
    if self.op.hotplug:
3340
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3341
                                 constants.HOTPLUG_TARGET_NIC,
3342
                                 nic, None, idx)
3343

    
3344
  def Exec(self, feedback_fn):
3345
    """Modifies an instance.
3346

3347
    All parameters take effect only at the next restart of the instance.
3348

3349
    """
3350
    # Process here the warnings from CheckPrereq, as we don't have a
3351
    # feedback_fn there.
3352
    # TODO: Replace with self.LogWarning
3353
    for warn in self.warn:
3354
      feedback_fn("WARNING: %s" % warn)
3355

    
3356
    assert ((self.op.disk_template is None) ^
3357
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3358
      "Not owning any node resource locks"
3359

    
3360
    result = []
3361
    instance = self.instance
3362

    
3363
    # New primary node
3364
    if self.op.pnode:
3365
      instance.primary_node = self.op.pnode
3366

    
3367
    # runtime memory
3368
    if self.op.runtime_mem:
3369
      rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3370
                                                     instance,
3371
                                                     self.op.runtime_mem)
3372
      rpcres.Raise("Cannot modify instance runtime memory")
3373
      result.append(("runtime_memory", self.op.runtime_mem))
3374

    
3375
    # Apply disk changes
3376
    _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3377
                        self._CreateNewDisk, self._ModifyDisk,
3378
                        self._RemoveDisk)
3379
    _UpdateIvNames(0, instance.disks)
3380

    
3381
    if self.op.disk_template:
3382
      if __debug__:
3383
        check_nodes = set(instance.all_nodes)
3384
        if self.op.remote_node:
3385
          check_nodes.add(self.op.remote_node)
3386
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3387
          owned = self.owned_locks(level)
3388
          assert not (check_nodes - owned), \
3389
            ("Not owning the correct locks, owning %r, expected at least %r" %
3390
             (owned, check_nodes))
3391

    
3392
      r_shut = ShutdownInstanceDisks(self, instance)
3393
      if not r_shut:
3394
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3395
                                 " proceed with disk template conversion")
3396
      mode = (instance.disk_template, self.op.disk_template)
3397
      try:
3398
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3399
      except:
3400
        self.cfg.ReleaseDRBDMinors(instance.name)
3401
        raise
3402
      result.append(("disk_template", self.op.disk_template))
3403

    
3404
      assert instance.disk_template == self.op.disk_template, \
3405
        ("Expected disk template '%s', found '%s'" %
3406
         (self.op.disk_template, instance.disk_template))
3407

    
3408
    # Release node and resource locks if there are any (they might already have
3409
    # been released during disk conversion)
3410
    ReleaseLocks(self, locking.LEVEL_NODE)
3411
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3412

    
3413
    # Apply NIC changes
3414
    if self._new_nics is not None:
3415
      instance.nics = self._new_nics
3416
      result.extend(self._nic_chgdesc)
3417

    
3418
    # hvparams changes
3419
    if self.op.hvparams:
3420
      instance.hvparams = self.hv_inst
3421
      for key, val in self.op.hvparams.iteritems():
3422
        result.append(("hv/%s" % key, val))
3423

    
3424
    # beparams changes
3425
    if self.op.beparams:
3426
      instance.beparams = self.be_inst
3427
      for key, val in self.op.beparams.iteritems():
3428
        result.append(("be/%s" % key, val))
3429

    
3430
    # OS change
3431
    if self.op.os_name:
3432
      instance.os = self.op.os_name
3433

    
3434
    # osparams changes
3435
    if self.op.osparams:
3436
      instance.osparams = self.os_inst
3437
      for key, val in self.op.osparams.iteritems():
3438
        result.append(("os/%s" % key, val))
3439

    
3440
    if self.op.offline is None:
3441
      # Ignore
3442
      pass
3443
    elif self.op.offline:
3444
      # Mark instance as offline
3445
      self.cfg.MarkInstanceOffline(instance.name)
3446
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3447
    else:
3448
      # Mark instance as online, but stopped
3449
      self.cfg.MarkInstanceDown(instance.name)
3450
      result.append(("admin_state", constants.ADMINST_DOWN))
3451

    
3452
    self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3453

    
3454
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3455
                self.owned_locks(locking.LEVEL_NODE)), \
3456
      "All node locks should have been released by now"
3457

    
3458
    return result
3459

    
3460
  _DISK_CONVERSIONS = {
3461
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3462
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3463
    }
3464

    
3465

    
3466
class LUInstanceChangeGroup(LogicalUnit):
3467
  HPATH = "instance-change-group"
3468
  HTYPE = constants.HTYPE_INSTANCE
3469
  REQ_BGL = False
3470

    
3471
  def ExpandNames(self):
3472
    self.share_locks = ShareAll()
3473

    
3474
    self.needed_locks = {
3475
      locking.LEVEL_NODEGROUP: [],
3476
      locking.LEVEL_NODE: [],
3477
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3478
      }
3479

    
3480
    self._ExpandAndLockInstance()
3481

    
3482
    if self.op.target_groups:
3483
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3484
                                  self.op.target_groups)
3485
    else:
3486
      self.req_target_uuids = None
3487

    
3488
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3489

    
3490
  def DeclareLocks(self, level):
3491
    if level == locking.LEVEL_NODEGROUP:
3492
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3493

    
3494
      if self.req_target_uuids:
3495
        lock_groups = set(self.req_target_uuids)
3496

    
3497
        # Lock all groups used by instance optimistically; this requires going
3498
        # via the node before it's locked, requiring verification later on
3499
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3500
        lock_groups.update(instance_groups)
3501
      else:
3502
        # No target groups, need to lock all of them
3503
        lock_groups = locking.ALL_SET
3504

    
3505
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3506

    
3507
    elif level == locking.LEVEL_NODE:
3508
      if self.req_target_uuids:
3509
        # Lock all nodes used by instances
3510
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3511
        self._LockInstancesNodes()
3512

    
3513
        # Lock all nodes in all potential target groups
3514
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3515
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3516
        member_nodes = [node_name
3517
                        for group in lock_groups
3518
                        for node_name in self.cfg.GetNodeGroup(group).members]
3519
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3520
      else:
3521
        # Lock all nodes as all groups are potential targets
3522
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3523

    
3524
  def CheckPrereq(self):
3525
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3526
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3527
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3528

    
3529
    assert (self.req_target_uuids is None or
3530
            owned_groups.issuperset(self.req_target_uuids))
3531
    assert owned_instances == set([self.op.instance_name])
3532

    
3533
    # Get instance information
3534
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3535

    
3536
    # Check if node groups for locked instance are still correct
3537
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3538
      ("Instance %s's nodes changed while we kept the lock" %
3539
       self.op.instance_name)
3540

    
3541
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3542
                                          owned_groups)
3543

    
3544
    if self.req_target_uuids:
3545
      # User requested specific target groups
3546
      self.target_uuids = frozenset(self.req_target_uuids)
3547
    else:
3548
      # All groups except those used by the instance are potential targets
3549
      self.target_uuids = owned_groups - inst_groups
3550

    
3551
    conflicting_groups = self.target_uuids & inst_groups
3552
    if conflicting_groups:
3553
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3554
                                 " used by the instance '%s'" %
3555
                                 (utils.CommaJoin(conflicting_groups),
3556
                                  self.op.instance_name),
3557
                                 errors.ECODE_INVAL)
3558

    
3559
    if not self.target_uuids:
3560
      raise errors.OpPrereqError("There are no possible target groups",
3561
                                 errors.ECODE_INVAL)
3562

    
3563
  def BuildHooksEnv(self):
3564
    """Build hooks env.
3565

3566
    """
3567
    assert self.target_uuids
3568

    
3569
    env = {
3570
      "TARGET_GROUPS": " ".join(self.target_uuids),
3571
      }
3572

    
3573
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3574

    
3575
    return env
3576

    
3577
  def BuildHooksNodes(self):
3578
    """Build hooks nodes.
3579

3580
    """
3581
    mn = self.cfg.GetMasterNode()
3582
    return ([mn], [mn])
3583

    
3584
  def Exec(self, feedback_fn):
3585
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3586

    
3587
    assert instances == [self.op.instance_name], "Instance not locked"
3588

    
3589
    req = iallocator.IAReqGroupChange(instances=instances,
3590
                                      target_groups=list(self.target_uuids))
3591
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3592

    
3593
    ial.Run(self.op.iallocator)
3594

    
3595
    if not ial.success:
3596
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3597
                                 " instance '%s' using iallocator '%s': %s" %
3598
                                 (self.op.instance_name, self.op.iallocator,
3599
                                  ial.info), errors.ECODE_NORES)
3600

    
3601
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3602

    
3603
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3604
                 " instance '%s'", len(jobs), self.op.instance_name)
3605

    
3606
    return ResultWithJobs(jobs)