Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 75b21ca0

History | View | Annotate | Download (141.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
64

    
65
import ganeti.masterd.instance
66

    
67

    
68
#: Type description for changes as returned by L{_ApplyContainerMods}'s
69
#: callbacks
70
_TApplyContModsCbChanges = \
71
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
72
    ht.TNonEmptyString,
73
    ht.TAny,
74
    ])))
75

    
76

    
77
def _CheckHostnameSane(lu, name):
78
  """Ensures that a given hostname resolves to a 'sane' name.
79

80
  The given name is required to be a prefix of the resolved hostname,
81
  to prevent accidental mismatches.
82

83
  @param lu: the logical unit on behalf of which we're checking
84
  @param name: the name we should resolve and check
85
  @return: the resolved hostname object
86

87
  """
88
  hostname = netutils.GetHostname(name=name)
89
  if hostname.name != name:
90
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91
  if not utils.MatchNameComponent(name, [hostname.name]):
92
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93
                                " same as given hostname '%s'") %
94
                               (hostname.name, name), errors.ECODE_INVAL)
95
  return hostname
96

    
97

    
98
def _CheckOpportunisticLocking(op):
99
  """Generate error if opportunistic locking is not possible.
100

101
  """
102
  if op.opportunistic_locking and not op.iallocator:
103
    raise errors.OpPrereqError("Opportunistic locking is only available in"
104
                               " combination with an instance allocator",
105
                               errors.ECODE_INVAL)
106

    
107

    
108
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109
  """Wrapper around IAReqInstanceAlloc.
110

111
  @param op: The instance opcode
112
  @param disks: The computed disks
113
  @param nics: The computed nics
114
  @param beparams: The full filled beparams
115
  @param node_whitelist: List of nodes which should appear as online to the
116
    allocator (unless the node is already marked offline)
117

118
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
119

120
  """
121
  spindle_use = beparams[constants.BE_SPINDLE_USE]
122
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123
                                       disk_template=op.disk_template,
124
                                       tags=op.tags,
125
                                       os=op.os_type,
126
                                       vcpus=beparams[constants.BE_VCPUS],
127
                                       memory=beparams[constants.BE_MAXMEM],
128
                                       spindle_use=spindle_use,
129
                                       disks=disks,
130
                                       nics=[n.ToDict() for n in nics],
131
                                       hypervisor=op.hypervisor,
132
                                       node_whitelist=node_whitelist)
133

    
134

    
135
def _ComputeFullBeParams(op, cluster):
136
  """Computes the full beparams.
137

138
  @param op: The instance opcode
139
  @param cluster: The cluster config object
140

141
  @return: The fully filled beparams
142

143
  """
144
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
145
  for param, value in op.beparams.iteritems():
146
    if value == constants.VALUE_AUTO:
147
      op.beparams[param] = default_beparams[param]
148
  objects.UpgradeBeParams(op.beparams)
149
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150
  return cluster.SimpleFillBE(op.beparams)
151

    
152

    
153
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154
  """Computes the nics.
155

156
  @param op: The instance opcode
157
  @param cluster: Cluster configuration object
158
  @param default_ip: The default ip to assign
159
  @param cfg: An instance of the configuration object
160
  @param ec_id: Execution context ID
161

162
  @returns: The build up nics
163

164
  """
165
  nics = []
166
  for nic in op.nics:
167
    nic_mode_req = nic.get(constants.INIC_MODE, None)
168
    nic_mode = nic_mode_req
169
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
171

    
172
    net = nic.get(constants.INIC_NETWORK, None)
173
    link = nic.get(constants.NIC_LINK, None)
174
    ip = nic.get(constants.INIC_IP, None)
175

    
176
    if net is None or net.lower() == constants.VALUE_NONE:
177
      net = None
178
    else:
179
      if nic_mode_req is not None or link is not None:
180
        raise errors.OpPrereqError("If network is given, no mode or link"
181
                                   " is allowed to be passed",
182
                                   errors.ECODE_INVAL)
183

    
184
    # ip validity checks
185
    if ip is None or ip.lower() == constants.VALUE_NONE:
186
      nic_ip = None
187
    elif ip.lower() == constants.VALUE_AUTO:
188
      if not op.name_check:
189
        raise errors.OpPrereqError("IP address set to auto but name checks"
190
                                   " have been skipped",
191
                                   errors.ECODE_INVAL)
192
      nic_ip = default_ip
193
    else:
194
      # We defer pool operations until later, so that the iallocator has
195
      # filled in the instance's node(s) dimara
196
      if ip.lower() == constants.NIC_IP_POOL:
197
        if net is None:
198
          raise errors.OpPrereqError("if ip=pool, parameter network"
199
                                     " must be passed too",
200
                                     errors.ECODE_INVAL)
201

    
202
      elif not netutils.IPAddress.IsValid(ip):
203
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
204
                                   errors.ECODE_INVAL)
205

    
206
      nic_ip = ip
207

    
208
    # TODO: check the ip address for uniqueness
209
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
211
                                 errors.ECODE_INVAL)
212

    
213
    # MAC address verification
214
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216
      mac = utils.NormalizeAndValidateMac(mac)
217

    
218
      try:
219
        # TODO: We need to factor this out
220
        cfg.ReserveMAC(mac, ec_id)
221
      except errors.ReservationError:
222
        raise errors.OpPrereqError("MAC address %s already in use"
223
                                   " in cluster" % mac,
224
                                   errors.ECODE_NOTUNIQUE)
225

    
226
    #  Build nic parameters
227
    nicparams = {}
228
    if nic_mode_req:
229
      nicparams[constants.NIC_MODE] = nic_mode
230
    if link:
231
      nicparams[constants.NIC_LINK] = link
232

    
233
    check_params = cluster.SimpleFillNIC(nicparams)
234
    objects.NIC.CheckParameterSyntax(check_params)
235
    net_uuid = cfg.LookupNetwork(net)
236
    name = nic.get(constants.INIC_NAME, None)
237
    if name is not None and name.lower() == constants.VALUE_NONE:
238
      name = None
239
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240
                          network=net_uuid, nicparams=nicparams)
241
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
242
    nics.append(nic_obj)
243

    
244
  return nics
245

    
246

    
247
def _CheckForConflictingIp(lu, ip, node):
248
  """In case of conflicting IP address raise error.
249

250
  @type ip: string
251
  @param ip: IP address
252
  @type node: string
253
  @param node: node name
254

255
  """
256
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257
  if conf_net is not None:
258
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259
                                " network %s, but the target NIC does not." %
260
                                (ip, conf_net)),
261
                               errors.ECODE_STATE)
262

    
263
  return (None, None)
264

    
265

    
266
def _ComputeIPolicyInstanceSpecViolation(
267
  ipolicy, instance_spec, disk_template,
268
  _compute_fn=ComputeIPolicySpecViolation):
269
  """Compute if instance specs meets the specs of ipolicy.
270

271
  @type ipolicy: dict
272
  @param ipolicy: The ipolicy to verify against
273
  @param instance_spec: dict
274
  @param instance_spec: The instance spec to verify
275
  @type disk_template: string
276
  @param disk_template: the disk template of the instance
277
  @param _compute_fn: The function to verify ipolicy (unittest only)
278
  @see: L{ComputeIPolicySpecViolation}
279

280
  """
281
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
287

    
288
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289
                     disk_sizes, spindle_use, disk_template)
290

    
291

    
292
def _CheckOSVariant(os_obj, name):
293
  """Check whether an OS name conforms to the os variants specification.
294

295
  @type os_obj: L{objects.OS}
296
  @param os_obj: OS object to check
297
  @type name: string
298
  @param name: OS name passed by the user, to check for validity
299

300
  """
301
  variant = objects.OS.GetVariant(name)
302
  if not os_obj.supported_variants:
303
    if variant:
304
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305
                                 " passed)" % (os_obj.name, variant),
306
                                 errors.ECODE_INVAL)
307
    return
308
  if not variant:
309
    raise errors.OpPrereqError("OS name must include a variant",
310
                               errors.ECODE_INVAL)
311

    
312
  if variant not in os_obj.supported_variants:
313
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
314

    
315

    
316
class LUInstanceCreate(LogicalUnit):
317
  """Create an instance.
318

319
  """
320
  HPATH = "instance-add"
321
  HTYPE = constants.HTYPE_INSTANCE
322
  REQ_BGL = False
323

    
324
  def CheckArguments(self):
325
    """Check arguments.
326

327
    """
328
    # do not require name_check to ease forward/backward compatibility
329
    # for tools
330
    if self.op.no_install and self.op.start:
331
      self.LogInfo("No-installation mode selected, disabling startup")
332
      self.op.start = False
333
    # validate/normalize the instance name
334
    self.op.instance_name = \
335
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
336

    
337
    if self.op.ip_check and not self.op.name_check:
338
      # TODO: make the ip check more flexible and not depend on the name check
339
      raise errors.OpPrereqError("Cannot do IP address check without a name"
340
                                 " check", errors.ECODE_INVAL)
341

    
342
    # check nics' parameter names
343
    for nic in self.op.nics:
344
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345
    # check that NIC's parameters names are unique and valid
346
    utils.ValidateDeviceNames("NIC", self.op.nics)
347

    
348
    # check that disk's names are unique and valid
349
    utils.ValidateDeviceNames("disk", self.op.disks)
350

    
351
    cluster = self.cfg.GetClusterInfo()
352
    if not self.op.disk_template in cluster.enabled_disk_templates:
353
      raise errors.OpPrereqError("Cannot create an instance with disk template"
354
                                 " '%s', because it is not enabled in the"
355
                                 " cluster. Enabled disk templates are: %s." %
356
                                 (self.op.disk_template,
357
                                  ",".join(cluster.enabled_disk_templates)))
358

    
359
    # check disks. parameter names and consistent adopt/no-adopt strategy
360
    has_adopt = has_no_adopt = False
361
    for disk in self.op.disks:
362
      if self.op.disk_template != constants.DT_EXT:
363
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364
      if constants.IDISK_ADOPT in disk:
365
        has_adopt = True
366
      else:
367
        has_no_adopt = True
368
    if has_adopt and has_no_adopt:
369
      raise errors.OpPrereqError("Either all disks are adopted or none is",
370
                                 errors.ECODE_INVAL)
371
    if has_adopt:
372
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373
        raise errors.OpPrereqError("Disk adoption is not supported for the"
374
                                   " '%s' disk template" %
375
                                   self.op.disk_template,
376
                                   errors.ECODE_INVAL)
377
      if self.op.iallocator is not None:
378
        raise errors.OpPrereqError("Disk adoption not allowed with an"
379
                                   " iallocator script", errors.ECODE_INVAL)
380
      if self.op.mode == constants.INSTANCE_IMPORT:
381
        raise errors.OpPrereqError("Disk adoption not allowed for"
382
                                   " instance import", errors.ECODE_INVAL)
383
    else:
384
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
385
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386
                                   " but no 'adopt' parameter given" %
387
                                   self.op.disk_template,
388
                                   errors.ECODE_INVAL)
389

    
390
    self.adopt_disks = has_adopt
391

    
392
    # instance name verification
393
    if self.op.name_check:
394
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395
      self.op.instance_name = self.hostname1.name
396
      # used in CheckPrereq for ip ping check
397
      self.check_ip = self.hostname1.ip
398
    else:
399
      self.check_ip = None
400

    
401
    # file storage checks
402
    if (self.op.file_driver and
403
        not self.op.file_driver in constants.FILE_DRIVER):
404
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
405
                                 self.op.file_driver, errors.ECODE_INVAL)
406

    
407
    # set default file_driver if unset and required
408
    if (not self.op.file_driver and
409
        self.op.disk_template in [constants.DT_FILE,
410
                                  constants.DT_SHARED_FILE]):
411
      self.op.file_driver = constants.FD_DEFAULT
412

    
413
    if self.op.disk_template == constants.DT_FILE:
414
      opcodes.RequireFileStorage()
415
    elif self.op.disk_template == constants.DT_SHARED_FILE:
416
      opcodes.RequireSharedFileStorage()
417

    
418
    ### Node/iallocator related checks
419
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
420

    
421
    if self.op.pnode is not None:
422
      if self.op.disk_template in constants.DTS_INT_MIRROR:
423
        if self.op.snode is None:
424
          raise errors.OpPrereqError("The networked disk templates need"
425
                                     " a mirror node", errors.ECODE_INVAL)
426
      elif self.op.snode:
427
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
428
                        " template")
429
        self.op.snode = None
430

    
431
    _CheckOpportunisticLocking(self.op)
432

    
433
    self._cds = GetClusterDomainSecret()
434

    
435
    if self.op.mode == constants.INSTANCE_IMPORT:
436
      # On import force_variant must be True, because if we forced it at
437
      # initial install, our only chance when importing it back is that it
438
      # works again!
439
      self.op.force_variant = True
440

    
441
      if self.op.no_install:
442
        self.LogInfo("No-installation mode has no effect during import")
443

    
444
    elif self.op.mode == constants.INSTANCE_CREATE:
445
      if self.op.os_type is None:
446
        raise errors.OpPrereqError("No guest OS specified",
447
                                   errors.ECODE_INVAL)
448
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
449
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
450
                                   " installation" % self.op.os_type,
451
                                   errors.ECODE_STATE)
452
      if self.op.disk_template is None:
453
        raise errors.OpPrereqError("No disk template specified",
454
                                   errors.ECODE_INVAL)
455

    
456
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
457
      # Check handshake to ensure both clusters have the same domain secret
458
      src_handshake = self.op.source_handshake
459
      if not src_handshake:
460
        raise errors.OpPrereqError("Missing source handshake",
461
                                   errors.ECODE_INVAL)
462

    
463
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
464
                                                           src_handshake)
465
      if errmsg:
466
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
467
                                   errors.ECODE_INVAL)
468

    
469
      # Load and check source CA
470
      self.source_x509_ca_pem = self.op.source_x509_ca
471
      if not self.source_x509_ca_pem:
472
        raise errors.OpPrereqError("Missing source X509 CA",
473
                                   errors.ECODE_INVAL)
474

    
475
      try:
476
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
477
                                                    self._cds)
478
      except OpenSSL.crypto.Error, err:
479
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
480
                                   (err, ), errors.ECODE_INVAL)
481

    
482
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
483
      if errcode is not None:
484
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
485
                                   errors.ECODE_INVAL)
486

    
487
      self.source_x509_ca = cert
488

    
489
      src_instance_name = self.op.source_instance_name
490
      if not src_instance_name:
491
        raise errors.OpPrereqError("Missing source instance name",
492
                                   errors.ECODE_INVAL)
493

    
494
      self.source_instance_name = \
495
        netutils.GetHostname(name=src_instance_name).name
496

    
497
    else:
498
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
499
                                 self.op.mode, errors.ECODE_INVAL)
500

    
501
  def ExpandNames(self):
502
    """ExpandNames for CreateInstance.
503

504
    Figure out the right locks for instance creation.
505

506
    """
507
    self.needed_locks = {}
508

    
509
    instance_name = self.op.instance_name
510
    # this is just a preventive check, but someone might still add this
511
    # instance in the meantime, and creation will fail at lock-add time
512
    if instance_name in self.cfg.GetInstanceList():
513
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
514
                                 instance_name, errors.ECODE_EXISTS)
515

    
516
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
517

    
518
    if self.op.iallocator:
519
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
520
      # specifying a group on instance creation and then selecting nodes from
521
      # that group
522
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
523
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
524

    
525
      if self.op.opportunistic_locking:
526
        self.opportunistic_locks[locking.LEVEL_NODE] = True
527
    else:
528
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
529
      nodelist = [self.op.pnode]
530
      if self.op.snode is not None:
531
        self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
532
        nodelist.append(self.op.snode)
533
      self.needed_locks[locking.LEVEL_NODE] = nodelist
534

    
535
    # in case of import lock the source node too
536
    if self.op.mode == constants.INSTANCE_IMPORT:
537
      src_node = self.op.src_node
538
      src_path = self.op.src_path
539

    
540
      if src_path is None:
541
        self.op.src_path = src_path = self.op.instance_name
542

    
543
      if src_node is None:
544
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
545
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
546
        self.op.src_node = None
547
        if os.path.isabs(src_path):
548
          raise errors.OpPrereqError("Importing an instance from a path"
549
                                     " requires a source node option",
550
                                     errors.ECODE_INVAL)
551
      else:
552
        self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
553
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
554
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
555
        if not os.path.isabs(src_path):
556
          self.op.src_path = src_path = \
557
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
558

    
559
    self.needed_locks[locking.LEVEL_NODE_RES] = \
560
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
561

    
562
    # Optimistically acquire shared group locks (we're reading the
563
    # configuration).  We can't just call GetInstanceNodeGroups, because the
564
    # instance doesn't exist yet. Therefore we lock all node groups of all
565
    # nodes we have.
566
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
567
      # In the case we lock all nodes for opportunistic allocation, we have no
568
      # choice than to lock all groups, because they're allocated before nodes.
569
      # This is sad, but true. At least we release all those we don't need in
570
      # CheckPrereq later.
571
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
572
    else:
573
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
574
        list(self.cfg.GetNodeGroupsFromNodes(
575
          self.needed_locks[locking.LEVEL_NODE]))
576
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
577

    
578
  def DeclareLocks(self, level):
579
    if level == locking.LEVEL_NODE_RES and \
580
      self.opportunistic_locks[locking.LEVEL_NODE]:
581
      # Even when using opportunistic locking, we require the same set of
582
      # NODE_RES locks as we got NODE locks
583
      self.needed_locks[locking.LEVEL_NODE_RES] = \
584
        self.owned_locks(locking.LEVEL_NODE)
585

    
586
  def _RunAllocator(self):
587
    """Run the allocator based on input opcode.
588

589
    """
590
    if self.op.opportunistic_locking:
591
      # Only consider nodes for which a lock is held
592
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
593
    else:
594
      node_whitelist = None
595

    
596
    #TODO Export network to iallocator so that it chooses a pnode
597
    #     in a nodegroup that has the desired network connected to
598
    req = _CreateInstanceAllocRequest(self.op, self.disks,
599
                                      self.nics, self.be_full,
600
                                      node_whitelist)
601
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
602

    
603
    ial.Run(self.op.iallocator)
604

    
605
    if not ial.success:
606
      # When opportunistic locks are used only a temporary failure is generated
607
      if self.op.opportunistic_locking:
608
        ecode = errors.ECODE_TEMP_NORES
609
      else:
610
        ecode = errors.ECODE_NORES
611

    
612
      raise errors.OpPrereqError("Can't compute nodes using"
613
                                 " iallocator '%s': %s" %
614
                                 (self.op.iallocator, ial.info),
615
                                 ecode)
616

    
617
    self.op.pnode = ial.result[0]
618
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
619
                 self.op.instance_name, self.op.iallocator,
620
                 utils.CommaJoin(ial.result))
621

    
622
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
623

    
624
    if req.RequiredNodes() == 2:
625
      self.op.snode = ial.result[1]
626

    
627
  def BuildHooksEnv(self):
628
    """Build hooks env.
629

630
    This runs on master, primary and secondary nodes of the instance.
631

632
    """
633
    env = {
634
      "ADD_MODE": self.op.mode,
635
      }
636
    if self.op.mode == constants.INSTANCE_IMPORT:
637
      env["SRC_NODE"] = self.op.src_node
638
      env["SRC_PATH"] = self.op.src_path
639
      env["SRC_IMAGES"] = self.src_images
640

    
641
    env.update(BuildInstanceHookEnv(
642
      name=self.op.instance_name,
643
      primary_node=self.op.pnode,
644
      secondary_nodes=self.secondaries,
645
      status=self.op.start,
646
      os_type=self.op.os_type,
647
      minmem=self.be_full[constants.BE_MINMEM],
648
      maxmem=self.be_full[constants.BE_MAXMEM],
649
      vcpus=self.be_full[constants.BE_VCPUS],
650
      nics=NICListToTuple(self, self.nics),
651
      disk_template=self.op.disk_template,
652
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
653
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
654
             for d in self.disks],
655
      bep=self.be_full,
656
      hvp=self.hv_full,
657
      hypervisor_name=self.op.hypervisor,
658
      tags=self.op.tags,
659
      ))
660

    
661
    return env
662

    
663
  def BuildHooksNodes(self):
664
    """Build hooks nodes.
665

666
    """
667
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
668
    return nl, nl
669

    
670
  def _ReadExportInfo(self):
671
    """Reads the export information from disk.
672

673
    It will override the opcode source node and path with the actual
674
    information, if these two were not specified before.
675

676
    @return: the export information
677

678
    """
679
    assert self.op.mode == constants.INSTANCE_IMPORT
680

    
681
    src_node = self.op.src_node
682
    src_path = self.op.src_path
683

    
684
    if src_node is None:
685
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
686
      exp_list = self.rpc.call_export_list(locked_nodes)
687
      found = False
688
      for node in exp_list:
689
        if exp_list[node].fail_msg:
690
          continue
691
        if src_path in exp_list[node].payload:
692
          found = True
693
          self.op.src_node = src_node = node
694
          self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
695
                                                       src_path)
696
          break
697
      if not found:
698
        raise errors.OpPrereqError("No export found for relative path %s" %
699
                                   src_path, errors.ECODE_INVAL)
700

    
701
    CheckNodeOnline(self, src_node)
702
    result = self.rpc.call_export_info(src_node, src_path)
703
    result.Raise("No export or invalid export found in dir %s" % src_path)
704

    
705
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
706
    if not export_info.has_section(constants.INISECT_EXP):
707
      raise errors.ProgrammerError("Corrupted export config",
708
                                   errors.ECODE_ENVIRON)
709

    
710
    ei_version = export_info.get(constants.INISECT_EXP, "version")
711
    if (int(ei_version) != constants.EXPORT_VERSION):
712
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
713
                                 (ei_version, constants.EXPORT_VERSION),
714
                                 errors.ECODE_ENVIRON)
715
    return export_info
716

    
717
  def _ReadExportParams(self, einfo):
718
    """Use export parameters as defaults.
719

720
    In case the opcode doesn't specify (as in override) some instance
721
    parameters, then try to use them from the export information, if
722
    that declares them.
723

724
    """
725
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
726

    
727
    if self.op.disk_template is None:
728
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
729
        self.op.disk_template = einfo.get(constants.INISECT_INS,
730
                                          "disk_template")
731
        if self.op.disk_template not in constants.DISK_TEMPLATES:
732
          raise errors.OpPrereqError("Disk template specified in configuration"
733
                                     " file is not one of the allowed values:"
734
                                     " %s" %
735
                                     " ".join(constants.DISK_TEMPLATES),
736
                                     errors.ECODE_INVAL)
737
      else:
738
        raise errors.OpPrereqError("No disk template specified and the export"
739
                                   " is missing the disk_template information",
740
                                   errors.ECODE_INVAL)
741

    
742
    if not self.op.disks:
743
      disks = []
744
      # TODO: import the disk iv_name too
745
      for idx in range(constants.MAX_DISKS):
746
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
747
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
748
          disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
749
          disk = {
750
            constants.IDISK_SIZE: disk_sz,
751
            constants.IDISK_NAME: disk_name
752
            }
753
          disks.append(disk)
754
      self.op.disks = disks
755
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
756
        raise errors.OpPrereqError("No disk info specified and the export"
757
                                   " is missing the disk information",
758
                                   errors.ECODE_INVAL)
759

    
760
    if not self.op.nics:
761
      nics = []
762
      for idx in range(constants.MAX_NICS):
763
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
764
          ndict = {}
765
          for name in [constants.INIC_IP,
766
                       constants.INIC_MAC, constants.INIC_NAME]:
767
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
768
            ndict[name] = v
769
          network = einfo.get(constants.INISECT_INS,
770
                              "nic%d_%s" % (idx, constants.INIC_NETWORK))
771
          # in case network is given link and mode are inherited
772
          # from nodegroup's netparams and thus should not be passed here
773
          if network:
774
            ndict[constants.INIC_NETWORK] = network
775
          else:
776
            for name in list(constants.NICS_PARAMETERS):
777
              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
778
              ndict[name] = v
779
          nics.append(ndict)
780
        else:
781
          break
782
      self.op.nics = nics
783

    
784
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
785
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
786

    
787
    if (self.op.hypervisor is None and
788
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
789
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
790

    
791
    if einfo.has_section(constants.INISECT_HYP):
792
      # use the export parameters but do not override the ones
793
      # specified by the user
794
      for name, value in einfo.items(constants.INISECT_HYP):
795
        if name not in self.op.hvparams:
796
          self.op.hvparams[name] = value
797

    
798
    if einfo.has_section(constants.INISECT_BEP):
799
      # use the parameters, without overriding
800
      for name, value in einfo.items(constants.INISECT_BEP):
801
        if name not in self.op.beparams:
802
          self.op.beparams[name] = value
803
        # Compatibility for the old "memory" be param
804
        if name == constants.BE_MEMORY:
805
          if constants.BE_MAXMEM not in self.op.beparams:
806
            self.op.beparams[constants.BE_MAXMEM] = value
807
          if constants.BE_MINMEM not in self.op.beparams:
808
            self.op.beparams[constants.BE_MINMEM] = value
809
    else:
810
      # try to read the parameters old style, from the main section
811
      for name in constants.BES_PARAMETERS:
812
        if (name not in self.op.beparams and
813
            einfo.has_option(constants.INISECT_INS, name)):
814
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
815

    
816
    if einfo.has_section(constants.INISECT_OSP):
817
      # use the parameters, without overriding
818
      for name, value in einfo.items(constants.INISECT_OSP):
819
        if name not in self.op.osparams:
820
          self.op.osparams[name] = value
821

    
822
  def _RevertToDefaults(self, cluster):
823
    """Revert the instance parameters to the default values.
824

825
    """
826
    # hvparams
827
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
828
    for name in self.op.hvparams.keys():
829
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
830
        del self.op.hvparams[name]
831
    # beparams
832
    be_defs = cluster.SimpleFillBE({})
833
    for name in self.op.beparams.keys():
834
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
835
        del self.op.beparams[name]
836
    # nic params
837
    nic_defs = cluster.SimpleFillNIC({})
838
    for nic in self.op.nics:
839
      for name in constants.NICS_PARAMETERS:
840
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
841
          del nic[name]
842
    # osparams
843
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
844
    for name in self.op.osparams.keys():
845
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
846
        del self.op.osparams[name]
847

    
848
  def _CalculateFileStorageDir(self):
849
    """Calculate final instance file storage dir.
850

851
    """
852
    # file storage dir calculation/check
853
    self.instance_file_storage_dir = None
854
    if self.op.disk_template in constants.DTS_FILEBASED:
855
      # build the full file storage dir path
856
      joinargs = []
857

    
858
      if self.op.disk_template == constants.DT_SHARED_FILE:
859
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
860
      else:
861
        get_fsd_fn = self.cfg.GetFileStorageDir
862

    
863
      cfg_storagedir = get_fsd_fn()
864
      if not cfg_storagedir:
865
        raise errors.OpPrereqError("Cluster file storage dir not defined",
866
                                   errors.ECODE_STATE)
867
      joinargs.append(cfg_storagedir)
868

    
869
      if self.op.file_storage_dir is not None:
870
        joinargs.append(self.op.file_storage_dir)
871

    
872
      joinargs.append(self.op.instance_name)
873

    
874
      # pylint: disable=W0142
875
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
876

    
877
  def CheckPrereq(self): # pylint: disable=R0914
878
    """Check prerequisites.
879

880
    """
881
    # Check that the optimistically acquired groups are correct wrt the
882
    # acquired nodes
883
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
884
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
885
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
886
    if not owned_groups.issuperset(cur_groups):
887
      raise errors.OpPrereqError("New instance %s's node groups changed since"
888
                                 " locks were acquired, current groups are"
889
                                 " are '%s', owning groups '%s'; retry the"
890
                                 " operation" %
891
                                 (self.op.instance_name,
892
                                  utils.CommaJoin(cur_groups),
893
                                  utils.CommaJoin(owned_groups)),
894
                                 errors.ECODE_STATE)
895

    
896
    self._CalculateFileStorageDir()
897

    
898
    if self.op.mode == constants.INSTANCE_IMPORT:
899
      export_info = self._ReadExportInfo()
900
      self._ReadExportParams(export_info)
901
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
902
    else:
903
      self._old_instance_name = None
904

    
905
    if (not self.cfg.GetVGName() and
906
        self.op.disk_template not in constants.DTS_NOT_LVM):
907
      raise errors.OpPrereqError("Cluster does not support lvm-based"
908
                                 " instances", errors.ECODE_STATE)
909

    
910
    if (self.op.hypervisor is None or
911
        self.op.hypervisor == constants.VALUE_AUTO):
912
      self.op.hypervisor = self.cfg.GetHypervisorType()
913

    
914
    cluster = self.cfg.GetClusterInfo()
915
    enabled_hvs = cluster.enabled_hypervisors
916
    if self.op.hypervisor not in enabled_hvs:
917
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
918
                                 " cluster (%s)" %
919
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
920
                                 errors.ECODE_STATE)
921

    
922
    # Check tag validity
923
    for tag in self.op.tags:
924
      objects.TaggableObject.ValidateTag(tag)
925

    
926
    # check hypervisor parameter syntax (locally)
927
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
928
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
929
                                      self.op.hvparams)
930
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
931
    hv_type.CheckParameterSyntax(filled_hvp)
932
    self.hv_full = filled_hvp
933
    # check that we don't specify global parameters on an instance
934
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
935
                         "instance", "cluster")
936

    
937
    # fill and remember the beparams dict
938
    self.be_full = _ComputeFullBeParams(self.op, cluster)
939

    
940
    # build os parameters
941
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
942

    
943
    # now that hvp/bep are in final format, let's reset to defaults,
944
    # if told to do so
945
    if self.op.identify_defaults:
946
      self._RevertToDefaults(cluster)
947

    
948
    # NIC buildup
949
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
950
                             self.proc.GetECId())
951

    
952
    # disk checks/pre-build
953
    default_vg = self.cfg.GetVGName()
954
    self.disks = ComputeDisks(self.op, default_vg)
955

    
956
    if self.op.mode == constants.INSTANCE_IMPORT:
957
      disk_images = []
958
      for idx in range(len(self.disks)):
959
        option = "disk%d_dump" % idx
960
        if export_info.has_option(constants.INISECT_INS, option):
961
          # FIXME: are the old os-es, disk sizes, etc. useful?
962
          export_name = export_info.get(constants.INISECT_INS, option)
963
          image = utils.PathJoin(self.op.src_path, export_name)
964
          disk_images.append(image)
965
        else:
966
          disk_images.append(False)
967

    
968
      self.src_images = disk_images
969

    
970
      if self.op.instance_name == self._old_instance_name:
971
        for idx, nic in enumerate(self.nics):
972
          if nic.mac == constants.VALUE_AUTO:
973
            nic_mac_ini = "nic%d_mac" % idx
974
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
975

    
976
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
977

    
978
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
979
    if self.op.ip_check:
980
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
981
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
982
                                   (self.check_ip, self.op.instance_name),
983
                                   errors.ECODE_NOTUNIQUE)
984

    
985
    #### mac address generation
986
    # By generating here the mac address both the allocator and the hooks get
987
    # the real final mac address rather than the 'auto' or 'generate' value.
988
    # There is a race condition between the generation and the instance object
989
    # creation, which means that we know the mac is valid now, but we're not
990
    # sure it will be when we actually add the instance. If things go bad
991
    # adding the instance will abort because of a duplicate mac, and the
992
    # creation job will fail.
993
    for nic in self.nics:
994
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
995
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
996

    
997
    #### allocator run
998

    
999
    if self.op.iallocator is not None:
1000
      self._RunAllocator()
1001

    
1002
    # Release all unneeded node locks
1003
    keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
1004
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1005
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1006
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1007
    # Release all unneeded group locks
1008
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1009
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1010

    
1011
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1012
            self.owned_locks(locking.LEVEL_NODE_RES)), \
1013
      "Node locks differ from node resource locks"
1014

    
1015
    #### node related checks
1016

    
1017
    # check primary node
1018
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
1019
    assert self.pnode is not None, \
1020
      "Cannot retrieve locked node %s" % self.op.pnode
1021
    if pnode.offline:
1022
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1023
                                 pnode.name, errors.ECODE_STATE)
1024
    if pnode.drained:
1025
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1026
                                 pnode.name, errors.ECODE_STATE)
1027
    if not pnode.vm_capable:
1028
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1029
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1030

    
1031
    self.secondaries = []
1032

    
1033
    # Fill in any IPs from IP pools. This must happen here, because we need to
1034
    # know the nic's primary node, as specified by the iallocator
1035
    for idx, nic in enumerate(self.nics):
1036
      net_uuid = nic.network
1037
      if net_uuid is not None:
1038
        nobj = self.cfg.GetNetwork(net_uuid)
1039
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
1040
        if netparams is None:
1041
          raise errors.OpPrereqError("No netparams found for network"
1042
                                     " %s. Propably not connected to"
1043
                                     " node's %s nodegroup" %
1044
                                     (nobj.name, self.pnode.name),
1045
                                     errors.ECODE_INVAL)
1046
        self.LogInfo("NIC/%d inherits netparams %s" %
1047
                     (idx, netparams.values()))
1048
        nic.nicparams = dict(netparams)
1049
        if nic.ip is not None:
1050
          if nic.ip.lower() == constants.NIC_IP_POOL:
1051
            try:
1052
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1053
            except errors.ReservationError:
1054
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1055
                                         " from the address pool" % idx,
1056
                                         errors.ECODE_STATE)
1057
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1058
          else:
1059
            try:
1060
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1061
            except errors.ReservationError:
1062
              raise errors.OpPrereqError("IP address %s already in use"
1063
                                         " or does not belong to network %s" %
1064
                                         (nic.ip, nobj.name),
1065
                                         errors.ECODE_NOTUNIQUE)
1066

    
1067
      # net is None, ip None or given
1068
      elif self.op.conflicts_check:
1069
        _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1070

    
1071
    # mirror node verification
1072
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1073
      if self.op.snode == pnode.name:
1074
        raise errors.OpPrereqError("The secondary node cannot be the"
1075
                                   " primary node", errors.ECODE_INVAL)
1076
      CheckNodeOnline(self, self.op.snode)
1077
      CheckNodeNotDrained(self, self.op.snode)
1078
      CheckNodeVmCapable(self, self.op.snode)
1079
      self.secondaries.append(self.op.snode)
1080

    
1081
      snode = self.cfg.GetNodeInfo(self.op.snode)
1082
      if pnode.group != snode.group:
1083
        self.LogWarning("The primary and secondary nodes are in two"
1084
                        " different node groups; the disk parameters"
1085
                        " from the first disk's node group will be"
1086
                        " used")
1087

    
1088
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1089
      nodes = [pnode]
1090
      if self.op.disk_template in constants.DTS_INT_MIRROR:
1091
        nodes.append(snode)
1092
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1093
      if compat.any(map(has_es, nodes)):
1094
        raise errors.OpPrereqError("Disk template %s not supported with"
1095
                                   " exclusive storage" % self.op.disk_template,
1096
                                   errors.ECODE_STATE)
1097

    
1098
    nodenames = [pnode.name] + self.secondaries
1099

    
1100
    if not self.adopt_disks:
1101
      if self.op.disk_template == constants.DT_RBD:
1102
        # _CheckRADOSFreeSpace() is just a placeholder.
1103
        # Any function that checks prerequisites can be placed here.
1104
        # Check if there is enough space on the RADOS cluster.
1105
        CheckRADOSFreeSpace()
1106
      elif self.op.disk_template == constants.DT_EXT:
1107
        # FIXME: Function that checks prereqs if needed
1108
        pass
1109
      else:
1110
        # Check lv size requirements, if not adopting
1111
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1112
        CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1113

    
1114
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1115
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1116
                                disk[constants.IDISK_ADOPT])
1117
                     for disk in self.disks])
1118
      if len(all_lvs) != len(self.disks):
1119
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1120
                                   errors.ECODE_INVAL)
1121
      for lv_name in all_lvs:
1122
        try:
1123
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1124
          # to ReserveLV uses the same syntax
1125
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1126
        except errors.ReservationError:
1127
          raise errors.OpPrereqError("LV named %s used by another instance" %
1128
                                     lv_name, errors.ECODE_NOTUNIQUE)
1129

    
1130
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1131
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1132

    
1133
      node_lvs = self.rpc.call_lv_list([pnode.name],
1134
                                       vg_names.payload.keys())[pnode.name]
1135
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1136
      node_lvs = node_lvs.payload
1137

    
1138
      delta = all_lvs.difference(node_lvs.keys())
1139
      if delta:
1140
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1141
                                   utils.CommaJoin(delta),
1142
                                   errors.ECODE_INVAL)
1143
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1144
      if online_lvs:
1145
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1146
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1147
                                   errors.ECODE_STATE)
1148
      # update the size of disk based on what is found
1149
      for dsk in self.disks:
1150
        dsk[constants.IDISK_SIZE] = \
1151
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1152
                                        dsk[constants.IDISK_ADOPT])][0]))
1153

    
1154
    elif self.op.disk_template == constants.DT_BLOCK:
1155
      # Normalize and de-duplicate device paths
1156
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1157
                       for disk in self.disks])
1158
      if len(all_disks) != len(self.disks):
1159
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1160
                                   errors.ECODE_INVAL)
1161
      baddisks = [d for d in all_disks
1162
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1163
      if baddisks:
1164
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1165
                                   " cannot be adopted" %
1166
                                   (utils.CommaJoin(baddisks),
1167
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1168
                                   errors.ECODE_INVAL)
1169

    
1170
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
1171
                                            list(all_disks))[pnode.name]
1172
      node_disks.Raise("Cannot get block device information from node %s" %
1173
                       pnode.name)
1174
      node_disks = node_disks.payload
1175
      delta = all_disks.difference(node_disks.keys())
1176
      if delta:
1177
        raise errors.OpPrereqError("Missing block device(s): %s" %
1178
                                   utils.CommaJoin(delta),
1179
                                   errors.ECODE_INVAL)
1180
      for dsk in self.disks:
1181
        dsk[constants.IDISK_SIZE] = \
1182
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1183

    
1184
    # Verify instance specs
1185
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1186
    ispec = {
1187
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1188
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1189
      constants.ISPEC_DISK_COUNT: len(self.disks),
1190
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1191
                                  for disk in self.disks],
1192
      constants.ISPEC_NIC_COUNT: len(self.nics),
1193
      constants.ISPEC_SPINDLE_USE: spindle_use,
1194
      }
1195

    
1196
    group_info = self.cfg.GetNodeGroup(pnode.group)
1197
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1198
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1199
                                               self.op.disk_template)
1200
    if not self.op.ignore_ipolicy and res:
1201
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1202
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1203
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1204

    
1205
    CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1206

    
1207
    CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1208
    # check OS parameters (remotely)
1209
    CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1210

    
1211
    CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1212

    
1213
    #TODO: _CheckExtParams (remotely)
1214
    # Check parameters for extstorage
1215

    
1216
    # memory check on primary node
1217
    #TODO(dynmem): use MINMEM for checking
1218
    if self.op.start:
1219
      CheckNodeFreeMemory(self, self.pnode.name,
1220
                          "creating instance %s" % self.op.instance_name,
1221
                          self.be_full[constants.BE_MAXMEM],
1222
                          self.op.hypervisor)
1223

    
1224
    self.dry_run_result = list(nodenames)
1225

    
1226
  def Exec(self, feedback_fn):
1227
    """Create and add the instance to the cluster.
1228

1229
    """
1230
    instance = self.op.instance_name
1231
    pnode_name = self.pnode.name
1232

    
1233
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1234
                self.owned_locks(locking.LEVEL_NODE)), \
1235
      "Node locks differ from node resource locks"
1236
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1237

    
1238
    ht_kind = self.op.hypervisor
1239
    if ht_kind in constants.HTS_REQ_PORT:
1240
      network_port = self.cfg.AllocatePort()
1241
    else:
1242
      network_port = None
1243

    
1244
    # This is ugly but we got a chicken-egg problem here
1245
    # We can only take the group disk parameters, as the instance
1246
    # has no disks yet (we are generating them right here).
1247
    node = self.cfg.GetNodeInfo(pnode_name)
1248
    nodegroup = self.cfg.GetNodeGroup(node.group)
1249
    disks = GenerateDiskTemplate(self,
1250
                                 self.op.disk_template,
1251
                                 instance, pnode_name,
1252
                                 self.secondaries,
1253
                                 self.disks,
1254
                                 self.instance_file_storage_dir,
1255
                                 self.op.file_driver,
1256
                                 0,
1257
                                 feedback_fn,
1258
                                 self.cfg.GetGroupDiskParams(nodegroup))
1259

    
1260
    iobj = objects.Instance(name=instance, os=self.op.os_type,
1261
                            primary_node=pnode_name,
1262
                            nics=self.nics, disks=disks,
1263
                            disk_template=self.op.disk_template,
1264
                            disks_active=False,
1265
                            admin_state=constants.ADMINST_DOWN,
1266
                            network_port=network_port,
1267
                            beparams=self.op.beparams,
1268
                            hvparams=self.op.hvparams,
1269
                            hypervisor=self.op.hypervisor,
1270
                            osparams=self.op.osparams,
1271
                            )
1272

    
1273
    if self.op.tags:
1274
      for tag in self.op.tags:
1275
        iobj.AddTag(tag)
1276

    
1277
    if self.adopt_disks:
1278
      if self.op.disk_template == constants.DT_PLAIN:
1279
        # rename LVs to the newly-generated names; we need to construct
1280
        # 'fake' LV disks with the old data, plus the new unique_id
1281
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1282
        rename_to = []
1283
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1284
          rename_to.append(t_dsk.logical_id)
1285
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1286
          self.cfg.SetDiskID(t_dsk, pnode_name)
1287
        result = self.rpc.call_blockdev_rename(pnode_name,
1288
                                               zip(tmp_disks, rename_to))
1289
        result.Raise("Failed to rename adoped LVs")
1290
    else:
1291
      feedback_fn("* creating instance disks...")
1292
      try:
1293
        CreateDisks(self, iobj)
1294
      except errors.OpExecError:
1295
        self.LogWarning("Device creation failed")
1296
        self.cfg.ReleaseDRBDMinors(instance)
1297
        raise
1298

    
1299
    feedback_fn("adding instance %s to cluster config" % instance)
1300

    
1301
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1302

    
1303
    # Declare that we don't want to remove the instance lock anymore, as we've
1304
    # added the instance to the config
1305
    del self.remove_locks[locking.LEVEL_INSTANCE]
1306

    
1307
    if self.op.mode == constants.INSTANCE_IMPORT:
1308
      # Release unused nodes
1309
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1310
    else:
1311
      # Release all nodes
1312
      ReleaseLocks(self, locking.LEVEL_NODE)
1313

    
1314
    disk_abort = False
1315
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1316
      feedback_fn("* wiping instance disks...")
1317
      try:
1318
        WipeDisks(self, iobj)
1319
      except errors.OpExecError, err:
1320
        logging.exception("Wiping disks failed")
1321
        self.LogWarning("Wiping instance disks failed (%s)", err)
1322
        disk_abort = True
1323

    
1324
    if disk_abort:
1325
      # Something is already wrong with the disks, don't do anything else
1326
      pass
1327
    elif self.op.wait_for_sync:
1328
      disk_abort = not WaitForSync(self, iobj)
1329
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1330
      # make sure the disks are not degraded (still sync-ing is ok)
1331
      feedback_fn("* checking mirrors status")
1332
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1333
    else:
1334
      disk_abort = False
1335

    
1336
    if disk_abort:
1337
      RemoveDisks(self, iobj)
1338
      self.cfg.RemoveInstance(iobj.name)
1339
      # Make sure the instance lock gets removed
1340
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1341
      raise errors.OpExecError("There are some degraded disks for"
1342
                               " this instance")
1343

    
1344
    # instance disks are now active
1345
    iobj.disks_active = True
1346

    
1347
    # Release all node resource locks
1348
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1349

    
1350
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1351
      # we need to set the disks ID to the primary node, since the
1352
      # preceding code might or might have not done it, depending on
1353
      # disk template and other options
1354
      for disk in iobj.disks:
1355
        self.cfg.SetDiskID(disk, pnode_name)
1356
      if self.op.mode == constants.INSTANCE_CREATE:
1357
        if not self.op.no_install:
1358
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1359
                        not self.op.wait_for_sync)
1360
          if pause_sync:
1361
            feedback_fn("* pausing disk sync to install instance OS")
1362
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1363
                                                              (iobj.disks,
1364
                                                               iobj), True)
1365
            for idx, success in enumerate(result.payload):
1366
              if not success:
1367
                logging.warn("pause-sync of instance %s for disk %d failed",
1368
                             instance, idx)
1369

    
1370
          feedback_fn("* running the instance OS create scripts...")
1371
          # FIXME: pass debug option from opcode to backend
1372
          os_add_result = \
1373
            self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1374
                                          self.op.debug_level)
1375
          if pause_sync:
1376
            feedback_fn("* resuming disk sync")
1377
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1378
                                                              (iobj.disks,
1379
                                                               iobj), False)
1380
            for idx, success in enumerate(result.payload):
1381
              if not success:
1382
                logging.warn("resume-sync of instance %s for disk %d failed",
1383
                             instance, idx)
1384

    
1385
          os_add_result.Raise("Could not add os for instance %s"
1386
                              " on node %s" % (instance, pnode_name))
1387

    
1388
      else:
1389
        if self.op.mode == constants.INSTANCE_IMPORT:
1390
          feedback_fn("* running the instance OS import scripts...")
1391

    
1392
          transfers = []
1393

    
1394
          for idx, image in enumerate(self.src_images):
1395
            if not image:
1396
              continue
1397

    
1398
            # FIXME: pass debug option from opcode to backend
1399
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1400
                                               constants.IEIO_FILE, (image, ),
1401
                                               constants.IEIO_SCRIPT,
1402
                                               (iobj.disks[idx], idx),
1403
                                               None)
1404
            transfers.append(dt)
1405

    
1406
          import_result = \
1407
            masterd.instance.TransferInstanceData(self, feedback_fn,
1408
                                                  self.op.src_node, pnode_name,
1409
                                                  self.pnode.secondary_ip,
1410
                                                  iobj, transfers)
1411
          if not compat.all(import_result):
1412
            self.LogWarning("Some disks for instance %s on node %s were not"
1413
                            " imported successfully" % (instance, pnode_name))
1414

    
1415
          rename_from = self._old_instance_name
1416

    
1417
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1418
          feedback_fn("* preparing remote import...")
1419
          # The source cluster will stop the instance before attempting to make
1420
          # a connection. In some cases stopping an instance can take a long
1421
          # time, hence the shutdown timeout is added to the connection
1422
          # timeout.
1423
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1424
                             self.op.source_shutdown_timeout)
1425
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1426

    
1427
          assert iobj.primary_node == self.pnode.name
1428
          disk_results = \
1429
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1430
                                          self.source_x509_ca,
1431
                                          self._cds, timeouts)
1432
          if not compat.all(disk_results):
1433
            # TODO: Should the instance still be started, even if some disks
1434
            # failed to import (valid for local imports, too)?
1435
            self.LogWarning("Some disks for instance %s on node %s were not"
1436
                            " imported successfully" % (instance, pnode_name))
1437

    
1438
          rename_from = self.source_instance_name
1439

    
1440
        else:
1441
          # also checked in the prereq part
1442
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1443
                                       % self.op.mode)
1444

    
1445
        # Run rename script on newly imported instance
1446
        assert iobj.name == instance
1447
        feedback_fn("Running rename script for %s" % instance)
1448
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1449
                                                   rename_from,
1450
                                                   self.op.debug_level)
1451
        if result.fail_msg:
1452
          self.LogWarning("Failed to run rename script for %s on node"
1453
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
1454

    
1455
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1456

    
1457
    if self.op.start:
1458
      iobj.admin_state = constants.ADMINST_UP
1459
      self.cfg.Update(iobj, feedback_fn)
1460
      logging.info("Starting instance %s on node %s", instance, pnode_name)
1461
      feedback_fn("* starting instance...")
1462
      result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1463
                                            False, self.op.reason)
1464
      result.Raise("Could not start instance")
1465

    
1466
    return list(iobj.all_nodes)
1467

    
1468

    
1469
class LUInstanceRename(LogicalUnit):
1470
  """Rename an instance.
1471

1472
  """
1473
  HPATH = "instance-rename"
1474
  HTYPE = constants.HTYPE_INSTANCE
1475

    
1476
  def CheckArguments(self):
1477
    """Check arguments.
1478

1479
    """
1480
    if self.op.ip_check and not self.op.name_check:
1481
      # TODO: make the ip check more flexible and not depend on the name check
1482
      raise errors.OpPrereqError("IP address check requires a name check",
1483
                                 errors.ECODE_INVAL)
1484

    
1485
  def BuildHooksEnv(self):
1486
    """Build hooks env.
1487

1488
    This runs on master, primary and secondary nodes of the instance.
1489

1490
    """
1491
    env = BuildInstanceHookEnvByObject(self, self.instance)
1492
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1493
    return env
1494

    
1495
  def BuildHooksNodes(self):
1496
    """Build hooks nodes.
1497

1498
    """
1499
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1500
    return (nl, nl)
1501

    
1502
  def CheckPrereq(self):
1503
    """Check prerequisites.
1504

1505
    This checks that the instance is in the cluster and is not running.
1506

1507
    """
1508
    self.op.instance_name = ExpandInstanceName(self.cfg,
1509
                                               self.op.instance_name)
1510
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1511
    assert instance is not None
1512
    CheckNodeOnline(self, instance.primary_node)
1513
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1514
                       msg="cannot rename")
1515
    self.instance = instance
1516

    
1517
    new_name = self.op.new_name
1518
    if self.op.name_check:
1519
      hostname = _CheckHostnameSane(self, new_name)
1520
      new_name = self.op.new_name = hostname.name
1521
      if (self.op.ip_check and
1522
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1523
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1524
                                   (hostname.ip, new_name),
1525
                                   errors.ECODE_NOTUNIQUE)
1526

    
1527
    instance_list = self.cfg.GetInstanceList()
1528
    if new_name in instance_list and new_name != instance.name:
1529
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1530
                                 new_name, errors.ECODE_EXISTS)
1531

    
1532
  def Exec(self, feedback_fn):
1533
    """Rename the instance.
1534

1535
    """
1536
    inst = self.instance
1537
    old_name = inst.name
1538

    
1539
    rename_file_storage = False
1540
    if (inst.disk_template in constants.DTS_FILEBASED and
1541
        self.op.new_name != inst.name):
1542
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1543
      rename_file_storage = True
1544

    
1545
    self.cfg.RenameInstance(inst.name, self.op.new_name)
1546
    # Change the instance lock. This is definitely safe while we hold the BGL.
1547
    # Otherwise the new lock would have to be added in acquired mode.
1548
    assert self.REQ_BGL
1549
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1550
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1551
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1552

    
1553
    # re-read the instance from the configuration after rename
1554
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
1555

    
1556
    if rename_file_storage:
1557
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1558
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1559
                                                     old_file_storage_dir,
1560
                                                     new_file_storage_dir)
1561
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1562
                   " (but the instance has been renamed in Ganeti)" %
1563
                   (inst.primary_node, old_file_storage_dir,
1564
                    new_file_storage_dir))
1565

    
1566
    StartInstanceDisks(self, inst, None)
1567
    # update info on disks
1568
    info = GetInstanceInfoText(inst)
1569
    for (idx, disk) in enumerate(inst.disks):
1570
      for node in inst.all_nodes:
1571
        self.cfg.SetDiskID(disk, node)
1572
        result = self.rpc.call_blockdev_setinfo(node, disk, info)
1573
        if result.fail_msg:
1574
          self.LogWarning("Error setting info on node %s for disk %s: %s",
1575
                          node, idx, result.fail_msg)
1576
    try:
1577
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1578
                                                 old_name, self.op.debug_level)
1579
      msg = result.fail_msg
1580
      if msg:
1581
        msg = ("Could not run OS rename script for instance %s on node %s"
1582
               " (but the instance has been renamed in Ganeti): %s" %
1583
               (inst.name, inst.primary_node, msg))
1584
        self.LogWarning(msg)
1585
    finally:
1586
      ShutdownInstanceDisks(self, inst)
1587

    
1588
    return inst.name
1589

    
1590

    
1591
class LUInstanceRemove(LogicalUnit):
1592
  """Remove an instance.
1593

1594
  """
1595
  HPATH = "instance-remove"
1596
  HTYPE = constants.HTYPE_INSTANCE
1597
  REQ_BGL = False
1598

    
1599
  def ExpandNames(self):
1600
    self._ExpandAndLockInstance()
1601
    self.needed_locks[locking.LEVEL_NODE] = []
1602
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1603
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1604

    
1605
  def DeclareLocks(self, level):
1606
    if level == locking.LEVEL_NODE:
1607
      self._LockInstancesNodes()
1608
    elif level == locking.LEVEL_NODE_RES:
1609
      # Copy node locks
1610
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1611
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1612

    
1613
  def BuildHooksEnv(self):
1614
    """Build hooks env.
1615

1616
    This runs on master, primary and secondary nodes of the instance.
1617

1618
    """
1619
    env = BuildInstanceHookEnvByObject(self, self.instance)
1620
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1621
    return env
1622

    
1623
  def BuildHooksNodes(self):
1624
    """Build hooks nodes.
1625

1626
    """
1627
    nl = [self.cfg.GetMasterNode()]
1628
    nl_post = list(self.instance.all_nodes) + nl
1629
    return (nl, nl_post)
1630

    
1631
  def CheckPrereq(self):
1632
    """Check prerequisites.
1633

1634
    This checks that the instance is in the cluster.
1635

1636
    """
1637
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1638
    assert self.instance is not None, \
1639
      "Cannot retrieve locked instance %s" % self.op.instance_name
1640

    
1641
  def Exec(self, feedback_fn):
1642
    """Remove the instance.
1643

1644
    """
1645
    instance = self.instance
1646
    logging.info("Shutting down instance %s on node %s",
1647
                 instance.name, instance.primary_node)
1648

    
1649
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1650
                                             self.op.shutdown_timeout,
1651
                                             self.op.reason)
1652
    msg = result.fail_msg
1653
    if msg:
1654
      if self.op.ignore_failures:
1655
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
1656
      else:
1657
        raise errors.OpExecError("Could not shutdown instance %s on"
1658
                                 " node %s: %s" %
1659
                                 (instance.name, instance.primary_node, msg))
1660

    
1661
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1662
            self.owned_locks(locking.LEVEL_NODE_RES))
1663
    assert not (set(instance.all_nodes) -
1664
                self.owned_locks(locking.LEVEL_NODE)), \
1665
      "Not owning correct locks"
1666

    
1667
    RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1668

    
1669

    
1670
class LUInstanceMove(LogicalUnit):
1671
  """Move an instance by data-copying.
1672

1673
  """
1674
  HPATH = "instance-move"
1675
  HTYPE = constants.HTYPE_INSTANCE
1676
  REQ_BGL = False
1677

    
1678
  def ExpandNames(self):
1679
    self._ExpandAndLockInstance()
1680
    target_node = ExpandNodeName(self.cfg, self.op.target_node)
1681
    self.op.target_node = target_node
1682
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
1683
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1684
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1685

    
1686
  def DeclareLocks(self, level):
1687
    if level == locking.LEVEL_NODE:
1688
      self._LockInstancesNodes(primary_only=True)
1689
    elif level == locking.LEVEL_NODE_RES:
1690
      # Copy node locks
1691
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1692
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1693

    
1694
  def BuildHooksEnv(self):
1695
    """Build hooks env.
1696

1697
    This runs on master, primary and secondary nodes of the instance.
1698

1699
    """
1700
    env = {
1701
      "TARGET_NODE": self.op.target_node,
1702
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1703
      }
1704
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1705
    return env
1706

    
1707
  def BuildHooksNodes(self):
1708
    """Build hooks nodes.
1709

1710
    """
1711
    nl = [
1712
      self.cfg.GetMasterNode(),
1713
      self.instance.primary_node,
1714
      self.op.target_node,
1715
      ]
1716
    return (nl, nl)
1717

    
1718
  def CheckPrereq(self):
1719
    """Check prerequisites.
1720

1721
    This checks that the instance is in the cluster.
1722

1723
    """
1724
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1725
    assert self.instance is not None, \
1726
      "Cannot retrieve locked instance %s" % self.op.instance_name
1727

    
1728
    if instance.disk_template not in constants.DTS_COPYABLE:
1729
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1730
                                 instance.disk_template, errors.ECODE_STATE)
1731

    
1732
    node = self.cfg.GetNodeInfo(self.op.target_node)
1733
    assert node is not None, \
1734
      "Cannot retrieve locked node %s" % self.op.target_node
1735

    
1736
    self.target_node = target_node = node.name
1737

    
1738
    if target_node == instance.primary_node:
1739
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1740
                                 (instance.name, target_node),
1741
                                 errors.ECODE_STATE)
1742

    
1743
    bep = self.cfg.GetClusterInfo().FillBE(instance)
1744

    
1745
    for idx, dsk in enumerate(instance.disks):
1746
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1747
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1748
                                   " cannot copy" % idx, errors.ECODE_STATE)
1749

    
1750
    CheckNodeOnline(self, target_node)
1751
    CheckNodeNotDrained(self, target_node)
1752
    CheckNodeVmCapable(self, target_node)
1753
    cluster = self.cfg.GetClusterInfo()
1754
    group_info = self.cfg.GetNodeGroup(node.group)
1755
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1756
    CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1757
                           ignore=self.op.ignore_ipolicy)
1758

    
1759
    if instance.admin_state == constants.ADMINST_UP:
1760
      # check memory requirements on the secondary node
1761
      CheckNodeFreeMemory(self, target_node,
1762
                          "failing over instance %s" %
1763
                          instance.name, bep[constants.BE_MAXMEM],
1764
                          instance.hypervisor)
1765
    else:
1766
      self.LogInfo("Not checking memory on the secondary node as"
1767
                   " instance will not be started")
1768

    
1769
    # check bridge existance
1770
    CheckInstanceBridgesExist(self, instance, node=target_node)
1771

    
1772
  def Exec(self, feedback_fn):
1773
    """Move an instance.
1774

1775
    The move is done by shutting it down on its present node, copying
1776
    the data over (slow) and starting it on the new node.
1777

1778
    """
1779
    instance = self.instance
1780

    
1781
    source_node = instance.primary_node
1782
    target_node = self.target_node
1783

    
1784
    self.LogInfo("Shutting down instance %s on source node %s",
1785
                 instance.name, source_node)
1786

    
1787
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1788
            self.owned_locks(locking.LEVEL_NODE_RES))
1789

    
1790
    result = self.rpc.call_instance_shutdown(source_node, instance,
1791
                                             self.op.shutdown_timeout,
1792
                                             self.op.reason)
1793
    msg = result.fail_msg
1794
    if msg:
1795
      if self.op.ignore_consistency:
1796
        self.LogWarning("Could not shutdown instance %s on node %s."
1797
                        " Proceeding anyway. Please make sure node"
1798
                        " %s is down. Error details: %s",
1799
                        instance.name, source_node, source_node, msg)
1800
      else:
1801
        raise errors.OpExecError("Could not shutdown instance %s on"
1802
                                 " node %s: %s" %
1803
                                 (instance.name, source_node, msg))
1804

    
1805
    # create the target disks
1806
    try:
1807
      CreateDisks(self, instance, target_node=target_node)
1808
    except errors.OpExecError:
1809
      self.LogWarning("Device creation failed")
1810
      self.cfg.ReleaseDRBDMinors(instance.name)
1811
      raise
1812

    
1813
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1814

    
1815
    errs = []
1816
    # activate, get path, copy the data over
1817
    for idx, disk in enumerate(instance.disks):
1818
      self.LogInfo("Copying data for disk %d", idx)
1819
      result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1820
                                               instance.name, True, idx)
1821
      if result.fail_msg:
1822
        self.LogWarning("Can't assemble newly created disk %d: %s",
1823
                        idx, result.fail_msg)
1824
        errs.append(result.fail_msg)
1825
        break
1826
      dev_path, _ = result.payload
1827
      result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1828
                                             target_node, dev_path,
1829
                                             cluster_name)
1830
      if result.fail_msg:
1831
        self.LogWarning("Can't copy data over for disk %d: %s",
1832
                        idx, result.fail_msg)
1833
        errs.append(result.fail_msg)
1834
        break
1835

    
1836
    if errs:
1837
      self.LogWarning("Some disks failed to copy, aborting")
1838
      try:
1839
        RemoveDisks(self, instance, target_node=target_node)
1840
      finally:
1841
        self.cfg.ReleaseDRBDMinors(instance.name)
1842
        raise errors.OpExecError("Errors during disk copy: %s" %
1843
                                 (",".join(errs),))
1844

    
1845
    instance.primary_node = target_node
1846
    self.cfg.Update(instance, feedback_fn)
1847

    
1848
    self.LogInfo("Removing the disks on the original node")
1849
    RemoveDisks(self, instance, target_node=source_node)
1850

    
1851
    # Only start the instance if it's marked as up
1852
    if instance.admin_state == constants.ADMINST_UP:
1853
      self.LogInfo("Starting instance %s on node %s",
1854
                   instance.name, target_node)
1855

    
1856
      disks_ok, _ = AssembleInstanceDisks(self, instance,
1857
                                          ignore_secondaries=True)
1858
      if not disks_ok:
1859
        ShutdownInstanceDisks(self, instance)
1860
        raise errors.OpExecError("Can't activate the instance's disks")
1861

    
1862
      result = self.rpc.call_instance_start(target_node,
1863
                                            (instance, None, None), False,
1864
                                            self.op.reason)
1865
      msg = result.fail_msg
1866
      if msg:
1867
        ShutdownInstanceDisks(self, instance)
1868
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1869
                                 (instance.name, target_node, msg))
1870

    
1871

    
1872
class LUInstanceMultiAlloc(NoHooksLU):
1873
  """Allocates multiple instances at the same time.
1874

1875
  """
1876
  REQ_BGL = False
1877

    
1878
  def CheckArguments(self):
1879
    """Check arguments.
1880

1881
    """
1882
    nodes = []
1883
    for inst in self.op.instances:
1884
      if inst.iallocator is not None:
1885
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1886
                                   " instance objects", errors.ECODE_INVAL)
1887
      nodes.append(bool(inst.pnode))
1888
      if inst.disk_template in constants.DTS_INT_MIRROR:
1889
        nodes.append(bool(inst.snode))
1890

    
1891
    has_nodes = compat.any(nodes)
1892
    if compat.all(nodes) ^ has_nodes:
1893
      raise errors.OpPrereqError("There are instance objects providing"
1894
                                 " pnode/snode while others do not",
1895
                                 errors.ECODE_INVAL)
1896

    
1897
    if not has_nodes and self.op.iallocator is None:
1898
      default_iallocator = self.cfg.GetDefaultIAllocator()
1899
      if default_iallocator:
1900
        self.op.iallocator = default_iallocator
1901
      else:
1902
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1903
                                   " given and no cluster-wide default"
1904
                                   " iallocator found; please specify either"
1905
                                   " an iallocator or nodes on the instances"
1906
                                   " or set a cluster-wide default iallocator",
1907
                                   errors.ECODE_INVAL)
1908

    
1909
    _CheckOpportunisticLocking(self.op)
1910

    
1911
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1912
    if dups:
1913
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1914
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1915

    
1916
  def ExpandNames(self):
1917
    """Calculate the locks.
1918

1919
    """
1920
    self.share_locks = ShareAll()
1921
    self.needed_locks = {
1922
      # iallocator will select nodes and even if no iallocator is used,
1923
      # collisions with LUInstanceCreate should be avoided
1924
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1925
      }
1926

    
1927
    if self.op.iallocator:
1928
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1929
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1930

    
1931
      if self.op.opportunistic_locking:
1932
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1933
    else:
1934
      nodeslist = []
1935
      for inst in self.op.instances:
1936
        inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1937
        nodeslist.append(inst.pnode)
1938
        if inst.snode is not None:
1939
          inst.snode = ExpandNodeName(self.cfg, inst.snode)
1940
          nodeslist.append(inst.snode)
1941

    
1942
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1943
      # Lock resources of instance's primary and secondary nodes (copy to
1944
      # prevent accidential modification)
1945
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1946

    
1947
  def DeclareLocks(self, level):
1948
    if level == locking.LEVEL_NODE_RES and \
1949
      self.opportunistic_locks[locking.LEVEL_NODE]:
1950
      # Even when using opportunistic locking, we require the same set of
1951
      # NODE_RES locks as we got NODE locks
1952
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1953
        self.owned_locks(locking.LEVEL_NODE)
1954

    
1955
  def CheckPrereq(self):
1956
    """Check prerequisite.
1957

1958
    """
1959
    if self.op.iallocator:
1960
      cluster = self.cfg.GetClusterInfo()
1961
      default_vg = self.cfg.GetVGName()
1962
      ec_id = self.proc.GetECId()
1963

    
1964
      if self.op.opportunistic_locking:
1965
        # Only consider nodes for which a lock is held
1966
        node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1967
      else:
1968
        node_whitelist = None
1969

    
1970
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1971
                                           _ComputeNics(op, cluster, None,
1972
                                                        self.cfg, ec_id),
1973
                                           _ComputeFullBeParams(op, cluster),
1974
                                           node_whitelist)
1975
               for op in self.op.instances]
1976

    
1977
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1978
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1979

    
1980
      ial.Run(self.op.iallocator)
1981

    
1982
      if not ial.success:
1983
        raise errors.OpPrereqError("Can't compute nodes using"
1984
                                   " iallocator '%s': %s" %
1985
                                   (self.op.iallocator, ial.info),
1986
                                   errors.ECODE_NORES)
1987

    
1988
      self.ia_result = ial.result
1989

    
1990
    if self.op.dry_run:
1991
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1992
        constants.JOB_IDS_KEY: [],
1993
        })
1994

    
1995
  def _ConstructPartialResult(self):
1996
    """Contructs the partial result.
1997

1998
    """
1999
    if self.op.iallocator:
2000
      (allocatable, failed_insts) = self.ia_result
2001
      allocatable_insts = map(compat.fst, allocatable)
2002
    else:
2003
      allocatable_insts = [op.instance_name for op in self.op.instances]
2004
      failed_insts = []
2005

    
2006
    return {
2007
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
2008
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
2009
      }
2010

    
2011
  def Exec(self, feedback_fn):
2012
    """Executes the opcode.
2013

2014
    """
2015
    jobs = []
2016
    if self.op.iallocator:
2017
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2018
      (allocatable, failed) = self.ia_result
2019

    
2020
      for (name, nodes) in allocatable:
2021
        op = op2inst.pop(name)
2022

    
2023
        if len(nodes) > 1:
2024
          (op.pnode, op.snode) = nodes
2025
        else:
2026
          (op.pnode,) = nodes
2027

    
2028
        jobs.append([op])
2029

    
2030
      missing = set(op2inst.keys()) - set(failed)
2031
      assert not missing, \
2032
        "Iallocator did return incomplete result: %s" % \
2033
        utils.CommaJoin(missing)
2034
    else:
2035
      jobs.extend([op] for op in self.op.instances)
2036

    
2037
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2038

    
2039

    
2040
class _InstNicModPrivate:
2041
  """Data structure for network interface modifications.
2042

2043
  Used by L{LUInstanceSetParams}.
2044

2045
  """
2046
  def __init__(self):
2047
    self.params = None
2048
    self.filled = None
2049

    
2050

    
2051
def _PrepareContainerMods(mods, private_fn):
2052
  """Prepares a list of container modifications by adding a private data field.
2053

2054
  @type mods: list of tuples; (operation, index, parameters)
2055
  @param mods: List of modifications
2056
  @type private_fn: callable or None
2057
  @param private_fn: Callable for constructing a private data field for a
2058
    modification
2059
  @rtype: list
2060

2061
  """
2062
  if private_fn is None:
2063
    fn = lambda: None
2064
  else:
2065
    fn = private_fn
2066

    
2067
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2068

    
2069

    
2070
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
2071
  """Checks if nodes have enough physical CPUs
2072

2073
  This function checks if all given nodes have the needed number of
2074
  physical CPUs. In case any node has less CPUs or we cannot get the
2075
  information from the node, this function raises an OpPrereqError
2076
  exception.
2077

2078
  @type lu: C{LogicalUnit}
2079
  @param lu: a logical unit from which we get configuration data
2080
  @type nodenames: C{list}
2081
  @param nodenames: the list of node names to check
2082
  @type requested: C{int}
2083
  @param requested: the minimum acceptable number of physical CPUs
2084
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2085
      or we cannot check the node
2086

2087
  """
2088
  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2089
  for node in nodenames:
2090
    info = nodeinfo[node]
2091
    info.Raise("Cannot get current information from node %s" % node,
2092
               prereq=True, ecode=errors.ECODE_ENVIRON)
2093
    (_, _, (hv_info, )) = info.payload
2094
    num_cpus = hv_info.get("cpu_total", None)
2095
    if not isinstance(num_cpus, int):
2096
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2097
                                 " on node %s, result was '%s'" %
2098
                                 (node, num_cpus), errors.ECODE_ENVIRON)
2099
    if requested > num_cpus:
2100
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2101
                                 "required" % (node, num_cpus, requested),
2102
                                 errors.ECODE_NORES)
2103

    
2104

    
2105
def GetItemFromContainer(identifier, kind, container):
2106
  """Return the item refered by the identifier.
2107

2108
  @type identifier: string
2109
  @param identifier: Item index or name or UUID
2110
  @type kind: string
2111
  @param kind: One-word item description
2112
  @type container: list
2113
  @param container: Container to get the item from
2114

2115
  """
2116
  # Index
2117
  try:
2118
    idx = int(identifier)
2119
    if idx == -1:
2120
      # Append
2121
      absidx = len(container) - 1
2122
    elif idx < 0:
2123
      raise IndexError("Not accepting negative indices other than -1")
2124
    elif idx > len(container):
2125
      raise IndexError("Got %s index %s, but there are only %s" %
2126
                       (kind, idx, len(container)))
2127
    else:
2128
      absidx = idx
2129
    return (absidx, container[idx])
2130
  except ValueError:
2131
    pass
2132

    
2133
  for idx, item in enumerate(container):
2134
    if item.uuid == identifier or item.name == identifier:
2135
      return (idx, item)
2136

    
2137
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2138
                             (kind, identifier), errors.ECODE_NOENT)
2139

    
2140

    
2141
def _ApplyContainerMods(kind, container, chgdesc, mods,
2142
                        create_fn, modify_fn, remove_fn):
2143
  """Applies descriptions in C{mods} to C{container}.
2144

2145
  @type kind: string
2146
  @param kind: One-word item description
2147
  @type container: list
2148
  @param container: Container to modify
2149
  @type chgdesc: None or list
2150
  @param chgdesc: List of applied changes
2151
  @type mods: list
2152
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2153
  @type create_fn: callable
2154
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2155
    receives absolute item index, parameters and private data object as added
2156
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2157
    as list
2158
  @type modify_fn: callable
2159
  @param modify_fn: Callback for modifying an existing item
2160
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2161
    and private data object as added by L{_PrepareContainerMods}, returns
2162
    changes as list
2163
  @type remove_fn: callable
2164
  @param remove_fn: Callback on removing item; receives absolute item index,
2165
    item and private data object as added by L{_PrepareContainerMods}
2166

2167
  """
2168
  for (op, identifier, params, private) in mods:
2169
    changes = None
2170

    
2171
    if op == constants.DDM_ADD:
2172
      # Calculate where item will be added
2173
      # When adding an item, identifier can only be an index
2174
      try:
2175
        idx = int(identifier)
2176
      except ValueError:
2177
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2178
                                   " identifier for %s" % constants.DDM_ADD,
2179
                                   errors.ECODE_INVAL)
2180
      if idx == -1:
2181
        addidx = len(container)
2182
      else:
2183
        if idx < 0:
2184
          raise IndexError("Not accepting negative indices other than -1")
2185
        elif idx > len(container):
2186
          raise IndexError("Got %s index %s, but there are only %s" %
2187
                           (kind, idx, len(container)))
2188
        addidx = idx
2189

    
2190
      if create_fn is None:
2191
        item = params
2192
      else:
2193
        (item, changes) = create_fn(addidx, params, private)
2194

    
2195
      if idx == -1:
2196
        container.append(item)
2197
      else:
2198
        assert idx >= 0
2199
        assert idx <= len(container)
2200
        # list.insert does so before the specified index
2201
        container.insert(idx, item)
2202
    else:
2203
      # Retrieve existing item
2204
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2205

    
2206
      if op == constants.DDM_REMOVE:
2207
        assert not params
2208

    
2209
        changes = [("%s/%s" % (kind, absidx), "remove")]
2210

    
2211
        if remove_fn is not None:
2212
          msg = remove_fn(absidx, item, private)
2213
          if msg:
2214
            changes.append(("%s/%s" % (kind, absidx), msg))
2215

    
2216
        assert container[absidx] == item
2217
        del container[absidx]
2218
      elif op == constants.DDM_MODIFY:
2219
        if modify_fn is not None:
2220
          changes = modify_fn(absidx, item, params, private)
2221
      else:
2222
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2223

    
2224
    assert _TApplyContModsCbChanges(changes)
2225

    
2226
    if not (chgdesc is None or changes is None):
2227
      chgdesc.extend(changes)
2228

    
2229

    
2230
def _UpdateIvNames(base_index, disks):
2231
  """Updates the C{iv_name} attribute of disks.
2232

2233
  @type disks: list of L{objects.Disk}
2234

2235
  """
2236
  for (idx, disk) in enumerate(disks):
2237
    disk.iv_name = "disk/%s" % (base_index + idx, )
2238

    
2239

    
2240
class LUInstanceSetParams(LogicalUnit):
2241
  """Modifies an instances's parameters.
2242

2243
  """
2244
  HPATH = "instance-modify"
2245
  HTYPE = constants.HTYPE_INSTANCE
2246
  REQ_BGL = False
2247

    
2248
  @staticmethod
2249
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2250
    assert ht.TList(mods)
2251
    assert not mods or len(mods[0]) in (2, 3)
2252

    
2253
    if mods and len(mods[0]) == 2:
2254
      result = []
2255

    
2256
      addremove = 0
2257
      for op, params in mods:
2258
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2259
          result.append((op, -1, params))
2260
          addremove += 1
2261

    
2262
          if addremove > 1:
2263
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2264
                                       " supported at a time" % kind,
2265
                                       errors.ECODE_INVAL)
2266
        else:
2267
          result.append((constants.DDM_MODIFY, op, params))
2268

    
2269
      assert verify_fn(result)
2270
    else:
2271
      result = mods
2272

    
2273
    return result
2274

    
2275
  @staticmethod
2276
  def _CheckMods(kind, mods, key_types, item_fn):
2277
    """Ensures requested disk/NIC modifications are valid.
2278

2279
    """
2280
    for (op, _, params) in mods:
2281
      assert ht.TDict(params)
2282

    
2283
      # If 'key_types' is an empty dict, we assume we have an
2284
      # 'ext' template and thus do not ForceDictType
2285
      if key_types:
2286
        utils.ForceDictType(params, key_types)
2287

    
2288
      if op == constants.DDM_REMOVE:
2289
        if params:
2290
          raise errors.OpPrereqError("No settings should be passed when"
2291
                                     " removing a %s" % kind,
2292
                                     errors.ECODE_INVAL)
2293
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2294
        item_fn(op, params)
2295
      else:
2296
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2297

    
2298
  def _VerifyDiskModification(self, op, params):
2299
    """Verifies a disk modification.
2300

2301
    """
2302
    if op == constants.DDM_ADD:
2303
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2304
      if mode not in constants.DISK_ACCESS_SET:
2305
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2306
                                   errors.ECODE_INVAL)
2307

    
2308
      size = params.get(constants.IDISK_SIZE, None)
2309
      if size is None:
2310
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2311
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2312

    
2313
      try:
2314
        size = int(size)
2315
      except (TypeError, ValueError), err:
2316
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2317
                                   errors.ECODE_INVAL)
2318

    
2319
      params[constants.IDISK_SIZE] = size
2320
      name = params.get(constants.IDISK_NAME, None)
2321
      if name is not None and name.lower() == constants.VALUE_NONE:
2322
        params[constants.IDISK_NAME] = None
2323

    
2324
    elif op == constants.DDM_MODIFY:
2325
      if constants.IDISK_SIZE in params:
2326
        raise errors.OpPrereqError("Disk size change not possible, use"
2327
                                   " grow-disk", errors.ECODE_INVAL)
2328

    
2329
      # Disk modification supports changing only the disk name and mode.
2330
      # Changing arbitrary parameters is allowed only for ext disk template",
2331
      if self.instance.disk_template != constants.DT_EXT:
2332
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2333

    
2334
      name = params.get(constants.IDISK_NAME, None)
2335
      if name is not None and name.lower() == constants.VALUE_NONE:
2336
        params[constants.IDISK_NAME] = None
2337

    
2338
  @staticmethod
2339
  def _VerifyNicModification(op, params):
2340
    """Verifies a network interface modification.
2341

2342
    """
2343
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2344
      ip = params.get(constants.INIC_IP, None)
2345
      name = params.get(constants.INIC_NAME, None)
2346
      req_net = params.get(constants.INIC_NETWORK, None)
2347
      link = params.get(constants.NIC_LINK, None)
2348
      mode = params.get(constants.NIC_MODE, None)
2349
      if name is not None and name.lower() == constants.VALUE_NONE:
2350
        params[constants.INIC_NAME] = None
2351
      if req_net is not None:
2352
        if req_net.lower() == constants.VALUE_NONE:
2353
          params[constants.INIC_NETWORK] = None
2354
          req_net = None
2355
        elif link is not None or mode is not None:
2356
          raise errors.OpPrereqError("If network is given"
2357
                                     " mode or link should not",
2358
                                     errors.ECODE_INVAL)
2359

    
2360
      if op == constants.DDM_ADD:
2361
        macaddr = params.get(constants.INIC_MAC, None)
2362
        if macaddr is None:
2363
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2364

    
2365
      if ip is not None:
2366
        if ip.lower() == constants.VALUE_NONE:
2367
          params[constants.INIC_IP] = None
2368
        else:
2369
          if ip.lower() == constants.NIC_IP_POOL:
2370
            if op == constants.DDM_ADD and req_net is None:
2371
              raise errors.OpPrereqError("If ip=pool, parameter network"
2372
                                         " cannot be none",
2373
                                         errors.ECODE_INVAL)
2374
          else:
2375
            if not netutils.IPAddress.IsValid(ip):
2376
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2377
                                         errors.ECODE_INVAL)
2378

    
2379
      if constants.INIC_MAC in params:
2380
        macaddr = params[constants.INIC_MAC]
2381
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2382
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2383

    
2384
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2385
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2386
                                     " modifying an existing NIC",
2387
                                     errors.ECODE_INVAL)
2388

    
2389
  def CheckArguments(self):
2390
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2391
            self.op.hvparams or self.op.beparams or self.op.os_name or
2392
            self.op.osparams or self.op.offline is not None or
2393
            self.op.runtime_mem or self.op.pnode):
2394
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2395

    
2396
    if self.op.hvparams:
2397
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2398
                           "hypervisor", "instance", "cluster")
2399

    
2400
    self.op.disks = self._UpgradeDiskNicMods(
2401
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2402
    self.op.nics = self._UpgradeDiskNicMods(
2403
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2404

    
2405
    if self.op.disks and self.op.disk_template is not None:
2406
      raise errors.OpPrereqError("Disk template conversion and other disk"
2407
                                 " changes not supported at the same time",
2408
                                 errors.ECODE_INVAL)
2409

    
2410
    if (self.op.disk_template and
2411
        self.op.disk_template in constants.DTS_INT_MIRROR and
2412
        self.op.remote_node is None):
2413
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2414
                                 " one requires specifying a secondary node",
2415
                                 errors.ECODE_INVAL)
2416

    
2417
    # Check NIC modifications
2418
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2419
                    self._VerifyNicModification)
2420

    
2421
    if self.op.pnode:
2422
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2423

    
2424
  def ExpandNames(self):
2425
    self._ExpandAndLockInstance()
2426
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2427
    # Can't even acquire node locks in shared mode as upcoming changes in
2428
    # Ganeti 2.6 will start to modify the node object on disk conversion
2429
    self.needed_locks[locking.LEVEL_NODE] = []
2430
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2431
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2432
    # Look node group to look up the ipolicy
2433
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2434

    
2435
  def DeclareLocks(self, level):
2436
    if level == locking.LEVEL_NODEGROUP:
2437
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2438
      # Acquire locks for the instance's nodegroups optimistically. Needs
2439
      # to be verified in CheckPrereq
2440
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2441
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2442
    elif level == locking.LEVEL_NODE:
2443
      self._LockInstancesNodes()
2444
      if self.op.disk_template and self.op.remote_node:
2445
        self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2446
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2447
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2448
      # Copy node locks
2449
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2450
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2451

    
2452
  def BuildHooksEnv(self):
2453
    """Build hooks env.
2454

2455
    This runs on the master, primary and secondaries.
2456

2457
    """
2458
    args = {}
2459
    if constants.BE_MINMEM in self.be_new:
2460
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2461
    if constants.BE_MAXMEM in self.be_new:
2462
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2463
    if constants.BE_VCPUS in self.be_new:
2464
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2465
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2466
    # information at all.
2467

    
2468
    if self._new_nics is not None:
2469
      nics = []
2470

    
2471
      for nic in self._new_nics:
2472
        n = copy.deepcopy(nic)
2473
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2474
        n.nicparams = nicparams
2475
        nics.append(NICToTuple(self, n))
2476

    
2477
      args["nics"] = nics
2478

    
2479
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2480
    if self.op.disk_template:
2481
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2482
    if self.op.runtime_mem:
2483
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2484

    
2485
    return env
2486

    
2487
  def BuildHooksNodes(self):
2488
    """Build hooks nodes.
2489

2490
    """
2491
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2492
    return (nl, nl)
2493

    
2494
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2495
                              old_params, cluster, pnode):
2496

    
2497
    update_params_dict = dict([(key, params[key])
2498
                               for key in constants.NICS_PARAMETERS
2499
                               if key in params])
2500

    
2501
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2502
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2503

    
2504
    new_net_uuid = None
2505
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2506
    if new_net_uuid_or_name:
2507
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2508
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2509

    
2510
    if old_net_uuid:
2511
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2512

    
2513
    if new_net_uuid:
2514
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2515
      if not netparams:
2516
        raise errors.OpPrereqError("No netparams found for the network"
2517
                                   " %s, probably not connected" %
2518
                                   new_net_obj.name, errors.ECODE_INVAL)
2519
      new_params = dict(netparams)
2520
    else:
2521
      new_params = GetUpdatedParams(old_params, update_params_dict)
2522

    
2523
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2524

    
2525
    new_filled_params = cluster.SimpleFillNIC(new_params)
2526
    objects.NIC.CheckParameterSyntax(new_filled_params)
2527

    
2528
    new_mode = new_filled_params[constants.NIC_MODE]
2529
    if new_mode == constants.NIC_MODE_BRIDGED:
2530
      bridge = new_filled_params[constants.NIC_LINK]
2531
      msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2532
      if msg:
2533
        msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2534
        if self.op.force:
2535
          self.warn.append(msg)
2536
        else:
2537
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2538

    
2539
    elif new_mode == constants.NIC_MODE_ROUTED:
2540
      ip = params.get(constants.INIC_IP, old_ip)
2541
      if ip is None:
2542
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2543
                                   " on a routed NIC", errors.ECODE_INVAL)
2544

    
2545
    elif new_mode == constants.NIC_MODE_OVS:
2546
      # TODO: check OVS link
2547
      self.LogInfo("OVS links are currently not checked for correctness")
2548

    
2549
    if constants.INIC_MAC in params:
2550
      mac = params[constants.INIC_MAC]
2551
      if mac is None:
2552
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2553
                                   errors.ECODE_INVAL)
2554
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2555
        # otherwise generate the MAC address
2556
        params[constants.INIC_MAC] = \
2557
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2558
      else:
2559
        # or validate/reserve the current one
2560
        try:
2561
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2562
        except errors.ReservationError:
2563
          raise errors.OpPrereqError("MAC address '%s' already in use"
2564
                                     " in cluster" % mac,
2565
                                     errors.ECODE_NOTUNIQUE)
2566
    elif new_net_uuid != old_net_uuid:
2567

    
2568
      def get_net_prefix(net_uuid):
2569
        mac_prefix = None
2570
        if net_uuid:
2571
          nobj = self.cfg.GetNetwork(net_uuid)
2572
          mac_prefix = nobj.mac_prefix
2573

    
2574
        return mac_prefix
2575

    
2576
      new_prefix = get_net_prefix(new_net_uuid)
2577
      old_prefix = get_net_prefix(old_net_uuid)
2578
      if old_prefix != new_prefix:
2579
        params[constants.INIC_MAC] = \
2580
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2581

    
2582
    # if there is a change in (ip, network) tuple
2583
    new_ip = params.get(constants.INIC_IP, old_ip)
2584
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2585
      if new_ip:
2586
        # if IP is pool then require a network and generate one IP
2587
        if new_ip.lower() == constants.NIC_IP_POOL:
2588
          if new_net_uuid:
2589
            try:
2590
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2591
            except errors.ReservationError:
2592
              raise errors.OpPrereqError("Unable to get a free IP"
2593
                                         " from the address pool",
2594
                                         errors.ECODE_STATE)
2595
            self.LogInfo("Chose IP %s from network %s",
2596
                         new_ip,
2597
                         new_net_obj.name)
2598
            params[constants.INIC_IP] = new_ip
2599
          else:
2600
            raise errors.OpPrereqError("ip=pool, but no network found",
2601
                                       errors.ECODE_INVAL)
2602
        # Reserve new IP if in the new network if any
2603
        elif new_net_uuid:
2604
          try:
2605
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2606
            self.LogInfo("Reserving IP %s in network %s",
2607
                         new_ip, new_net_obj.name)
2608
          except errors.ReservationError:
2609
            raise errors.OpPrereqError("IP %s not available in network %s" %
2610
                                       (new_ip, new_net_obj.name),
2611
                                       errors.ECODE_NOTUNIQUE)
2612
        # new network is None so check if new IP is a conflicting IP
2613
        elif self.op.conflicts_check:
2614
          _CheckForConflictingIp(self, new_ip, pnode)
2615

    
2616
      # release old IP if old network is not None
2617
      if old_ip and old_net_uuid:
2618
        try:
2619
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2620
        except errors.AddressPoolError:
2621
          logging.warning("Release IP %s not contained in network %s",
2622
                          old_ip, old_net_obj.name)
2623

    
2624
    # there are no changes in (ip, network) tuple and old network is not None
2625
    elif (old_net_uuid is not None and
2626
          (req_link is not None or req_mode is not None)):
2627
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2628
                                 " a NIC that is connected to a network",
2629
                                 errors.ECODE_INVAL)
2630

    
2631
    private.params = new_params
2632
    private.filled = new_filled_params
2633

    
2634
  def _PreCheckDiskTemplate(self, pnode_info):
2635
    """CheckPrereq checks related to a new disk template."""
2636
    # Arguments are passed to avoid configuration lookups
2637
    instance = self.instance
2638
    pnode = instance.primary_node
2639
    cluster = self.cluster
2640
    if instance.disk_template == self.op.disk_template:
2641
      raise errors.OpPrereqError("Instance already has disk template %s" %
2642
                                 instance.disk_template, errors.ECODE_INVAL)
2643

    
2644
    if (instance.disk_template,
2645
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2646
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2647
                                 " %s to %s" % (instance.disk_template,
2648
                                                self.op.disk_template),
2649
                                 errors.ECODE_INVAL)
2650
    CheckInstanceState(self, instance, INSTANCE_DOWN,
2651
                       msg="cannot change disk template")
2652
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2653
      if self.op.remote_node == pnode:
2654
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2655
                                   " as the primary node of the instance" %
2656
                                   self.op.remote_node, errors.ECODE_STATE)
2657
      CheckNodeOnline(self, self.op.remote_node)
2658
      CheckNodeNotDrained(self, self.op.remote_node)
2659
      # FIXME: here we assume that the old instance type is DT_PLAIN
2660
      assert instance.disk_template == constants.DT_PLAIN
2661
      disks = [{constants.IDISK_SIZE: d.size,
2662
                constants.IDISK_VG: d.logical_id[0]}
2663
               for d in instance.disks]
2664
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2665
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2666

    
2667
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2668
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2669
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2670
                                                              snode_group)
2671
      CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2672
                             ignore=self.op.ignore_ipolicy)
2673
      if pnode_info.group != snode_info.group:
2674
        self.LogWarning("The primary and secondary nodes are in two"
2675
                        " different node groups; the disk parameters"
2676
                        " from the first disk's node group will be"
2677
                        " used")
2678

    
2679
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2680
      # Make sure none of the nodes require exclusive storage
2681
      nodes = [pnode_info]
2682
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2683
        assert snode_info
2684
        nodes.append(snode_info)
2685
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2686
      if compat.any(map(has_es, nodes)):
2687
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2688
                  " storage is enabled" % (instance.disk_template,
2689
                                           self.op.disk_template))
2690
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2691

    
2692
  def CheckPrereq(self):
2693
    """Check prerequisites.
2694

2695
    This only checks the instance list against the existing names.
2696

2697
    """
2698
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2699
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2700

    
2701
    cluster = self.cluster = self.cfg.GetClusterInfo()
2702
    assert self.instance is not None, \
2703
      "Cannot retrieve locked instance %s" % self.op.instance_name
2704

    
2705
    pnode = instance.primary_node
2706

    
2707
    self.warn = []
2708

    
2709
    if (self.op.pnode is not None and self.op.pnode != pnode and
2710
        not self.op.force):
2711
      # verify that the instance is not up
2712
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2713
                                                  instance.hypervisor)
2714
      if instance_info.fail_msg:
2715
        self.warn.append("Can't get instance runtime information: %s" %
2716
                         instance_info.fail_msg)
2717
      elif instance_info.payload:
2718
        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2719
                                   errors.ECODE_STATE)
2720

    
2721
    assert pnode in self.owned_locks(locking.LEVEL_NODE)
2722
    nodelist = list(instance.all_nodes)
2723
    pnode_info = self.cfg.GetNodeInfo(pnode)
2724
    self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2725

    
2726
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2727
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2728
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2729

    
2730
    # dictionary with instance information after the modification
2731
    ispec = {}
2732

    
2733
    # Check disk modifications. This is done here and not in CheckArguments
2734
    # (as with NICs), because we need to know the instance's disk template
2735
    if instance.disk_template == constants.DT_EXT:
2736
      self._CheckMods("disk", self.op.disks, {},
2737
                      self._VerifyDiskModification)
2738
    else:
2739
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2740
                      self._VerifyDiskModification)
2741

    
2742
    # Prepare disk/NIC modifications
2743
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2744
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2745

    
2746
    # Check the validity of the `provider' parameter
2747
    if instance.disk_template in constants.DT_EXT:
2748
      for mod in self.diskmod:
2749
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2750
        if mod[0] == constants.DDM_ADD:
2751
          if ext_provider is None:
2752
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2753
                                       " '%s' missing, during disk add" %
2754
                                       (constants.DT_EXT,
2755
                                        constants.IDISK_PROVIDER),
2756
                                       errors.ECODE_NOENT)
2757
        elif mod[0] == constants.DDM_MODIFY:
2758
          if ext_provider:
2759
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2760
                                       " modification" %
2761
                                       constants.IDISK_PROVIDER,
2762
                                       errors.ECODE_INVAL)
2763
    else:
2764
      for mod in self.diskmod:
2765
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2766
        if ext_provider is not None:
2767
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2768
                                     " instances of type '%s'" %
2769
                                     (constants.IDISK_PROVIDER,
2770
                                      constants.DT_EXT),
2771
                                     errors.ECODE_INVAL)
2772

    
2773
    if self.op.hotplug:
2774
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2775
                                               self.instance)
2776
      result.Raise("Hotplug is not supported.")
2777

    
2778
    # OS change
2779
    if self.op.os_name and not self.op.force:
2780
      CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2781
                     self.op.force_variant)
2782
      instance_os = self.op.os_name
2783
    else:
2784
      instance_os = instance.os
2785

    
2786
    assert not (self.op.disk_template and self.op.disks), \
2787
      "Can't modify disk template and apply disk changes at the same time"
2788

    
2789
    if self.op.disk_template:
2790
      self._PreCheckDiskTemplate(pnode_info)
2791

    
2792
    # hvparams processing
2793
    if self.op.hvparams:
2794
      hv_type = instance.hypervisor
2795
      i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2796
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2797
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2798

    
2799
      # local check
2800
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2801
      CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2802
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2803
      self.hv_inst = i_hvdict # the new dict (without defaults)
2804
    else:
2805
      self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2806
                                              instance.hvparams)
2807
      self.hv_new = self.hv_inst = {}
2808

    
2809
    # beparams processing
2810
    if self.op.beparams:
2811
      i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2812
                                  use_none=True)
2813
      objects.UpgradeBeParams(i_bedict)
2814
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2815
      be_new = cluster.SimpleFillBE(i_bedict)
2816
      self.be_proposed = self.be_new = be_new # the new actual values
2817
      self.be_inst = i_bedict # the new dict (without defaults)
2818
    else:
2819
      self.be_new = self.be_inst = {}
2820
      self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2821
    be_old = cluster.FillBE(instance)
2822

    
2823
    # CPU param validation -- checking every time a parameter is
2824
    # changed to cover all cases where either CPU mask or vcpus have
2825
    # changed
2826
    if (constants.BE_VCPUS in self.be_proposed and
2827
        constants.HV_CPU_MASK in self.hv_proposed):
2828
      cpu_list = \
2829
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2830
      # Verify mask is consistent with number of vCPUs. Can skip this
2831
      # test if only 1 entry in the CPU mask, which means same mask
2832
      # is applied to all vCPUs.
2833
      if (len(cpu_list) > 1 and
2834
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2835
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2836
                                   " CPU mask [%s]" %
2837
                                   (self.be_proposed[constants.BE_VCPUS],
2838
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2839
                                   errors.ECODE_INVAL)
2840

    
2841
      # Only perform this test if a new CPU mask is given
2842
      if constants.HV_CPU_MASK in self.hv_new:
2843
        # Calculate the largest CPU number requested
2844
        max_requested_cpu = max(map(max, cpu_list))
2845
        # Check that all of the instance's nodes have enough physical CPUs to
2846
        # satisfy the requested CPU mask
2847
        _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2848
                                max_requested_cpu + 1, instance.hypervisor)
2849

    
2850
    # osparams processing
2851
    if self.op.osparams:
2852
      i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2853
      CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2854
      self.os_inst = i_osdict # the new dict (without defaults)
2855
    else:
2856
      self.os_inst = {}
2857

    
2858
    #TODO(dynmem): do the appropriate check involving MINMEM
2859
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2860
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2861
      mem_check_list = [pnode]
2862
      if be_new[constants.BE_AUTO_BALANCE]:
2863
        # either we changed auto_balance to yes or it was from before
2864
        mem_check_list.extend(instance.secondary_nodes)
2865
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2866
                                                  instance.hypervisor)
2867
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2868
                                         [instance.hypervisor], False)
2869
      pninfo = nodeinfo[pnode]
2870
      msg = pninfo.fail_msg
2871
      if msg:
2872
        # Assume the primary node is unreachable and go ahead
2873
        self.warn.append("Can't get info from primary node %s: %s" %
2874
                         (pnode, msg))
2875
      else:
2876
        (_, _, (pnhvinfo, )) = pninfo.payload
2877
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2878
          self.warn.append("Node data from primary node %s doesn't contain"
2879
                           " free memory information" % pnode)
2880
        elif instance_info.fail_msg:
2881
          self.warn.append("Can't get instance runtime information: %s" %
2882
                           instance_info.fail_msg)
2883
        else:
2884
          if instance_info.payload:
2885
            current_mem = int(instance_info.payload["memory"])
2886
          else:
2887
            # Assume instance not running
2888
            # (there is a slight race condition here, but it's not very
2889
            # probable, and we have no other way to check)
2890
            # TODO: Describe race condition
2891
            current_mem = 0
2892
          #TODO(dynmem): do the appropriate check involving MINMEM
2893
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2894
                      pnhvinfo["memory_free"])
2895
          if miss_mem > 0:
2896
            raise errors.OpPrereqError("This change will prevent the instance"
2897
                                       " from starting, due to %d MB of memory"
2898
                                       " missing on its primary node" %
2899
                                       miss_mem, errors.ECODE_NORES)
2900

    
2901
      if be_new[constants.BE_AUTO_BALANCE]:
2902
        for node, nres in nodeinfo.items():
2903
          if node not in instance.secondary_nodes:
2904
            continue
2905
          nres.Raise("Can't get info from secondary node %s" % node,
2906
                     prereq=True, ecode=errors.ECODE_STATE)
2907
          (_, _, (nhvinfo, )) = nres.payload
2908
          if not isinstance(nhvinfo.get("memory_free", None), int):
2909
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2910
                                       " memory information" % node,
2911
                                       errors.ECODE_STATE)
2912
          #TODO(dynmem): do the appropriate check involving MINMEM
2913
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2914
            raise errors.OpPrereqError("This change will prevent the instance"
2915
                                       " from failover to its secondary node"
2916
                                       " %s, due to not enough memory" % node,
2917
                                       errors.ECODE_STATE)
2918

    
2919
    if self.op.runtime_mem:
2920
      remote_info = self.rpc.call_instance_info(instance.primary_node,
2921
                                                instance.name,
2922
                                                instance.hypervisor)
2923
      remote_info.Raise("Error checking node %s" % instance.primary_node)
2924
      if not remote_info.payload: # not running already
2925
        raise errors.OpPrereqError("Instance %s is not running" %
2926
                                   instance.name, errors.ECODE_STATE)
2927

    
2928
      current_memory = remote_info.payload["memory"]
2929
      if (not self.op.force and
2930
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2931
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2932
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2933
                                   " and %d MB of memory unless --force is"
2934
                                   " given" %
2935
                                   (instance.name,
2936
                                    self.be_proposed[constants.BE_MINMEM],
2937
                                    self.be_proposed[constants.BE_MAXMEM]),
2938
                                   errors.ECODE_INVAL)
2939

    
2940
      delta = self.op.runtime_mem - current_memory
2941
      if delta > 0:
2942
        CheckNodeFreeMemory(self, instance.primary_node,
2943
                            "ballooning memory for instance %s" %
2944
                            instance.name, delta, instance.hypervisor)
2945

    
2946
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2947
      raise errors.OpPrereqError("Disk operations not supported for"
2948
                                 " diskless instances", errors.ECODE_INVAL)
2949

    
2950
    def _PrepareNicCreate(_, params, private):
2951
      self._PrepareNicModification(params, private, None, None,
2952
                                   {}, cluster, pnode)
2953
      return (None, None)
2954

    
2955
    def _PrepareNicMod(_, nic, params, private):
2956
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2957
                                   nic.nicparams, cluster, pnode)
2958
      return None
2959

    
2960
    def _PrepareNicRemove(_, params, __):
2961
      ip = params.ip
2962
      net = params.network
2963
      if net is not None and ip is not None:
2964
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2965

    
2966
    # Verify NIC changes (operating on copy)
2967
    nics = instance.nics[:]
2968
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2969
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2970
    if len(nics) > constants.MAX_NICS:
2971
      raise errors.OpPrereqError("Instance has too many network interfaces"
2972
                                 " (%d), cannot add more" % constants.MAX_NICS,
2973
                                 errors.ECODE_STATE)
2974

    
2975
    def _PrepareDiskMod(_, disk, params, __):
2976
      disk.name = params.get(constants.IDISK_NAME, None)
2977

    
2978
    # Verify disk changes (operating on a copy)
2979
    disks = copy.deepcopy(instance.disks)
2980
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2981
                        _PrepareDiskMod, None)
2982
    utils.ValidateDeviceNames("disk", disks)
2983
    if len(disks) > constants.MAX_DISKS:
2984
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2985
                                 " more" % constants.MAX_DISKS,
2986
                                 errors.ECODE_STATE)
2987
    disk_sizes = [disk.size for disk in instance.disks]
2988
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2989
                      self.diskmod if op == constants.DDM_ADD)
2990
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2991
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2992

    
2993
    if self.op.offline is not None and self.op.offline:
2994
      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2995
                         msg="can't change to offline")
2996

    
2997
    # Pre-compute NIC changes (necessary to use result in hooks)
2998
    self._nic_chgdesc = []
2999
    if self.nicmod:
3000
      # Operate on copies as this is still in prereq
3001
      nics = [nic.Copy() for nic in instance.nics]
3002
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3003
                          self._CreateNewNic, self._ApplyNicMods,
3004
                          self._RemoveNic)
3005
      # Verify that NIC names are unique and valid
3006
      utils.ValidateDeviceNames("NIC", nics)
3007
      self._new_nics = nics
3008
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3009
    else:
3010
      self._new_nics = None
3011
      ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
3012

    
3013
    if not self.op.ignore_ipolicy:
3014
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
3015
                                                              group_info)
3016

    
3017
      # Fill ispec with backend parameters
3018
      ispec[constants.ISPEC_SPINDLE_USE] = \
3019
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3020
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3021
                                                         None)
3022

    
3023
      # Copy ispec to verify parameters with min/max values separately
3024
      if self.op.disk_template:
3025
        new_disk_template = self.op.disk_template
3026
      else:
3027
        new_disk_template = instance.disk_template
3028
      ispec_max = ispec.copy()
3029
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3030
        self.be_new.get(constants.BE_MAXMEM, None)
3031
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3032
                                                     new_disk_template)
3033
      ispec_min = ispec.copy()
3034
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3035
        self.be_new.get(constants.BE_MINMEM, None)
3036
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3037
                                                     new_disk_template)
3038

    
3039
      if (res_max or res_min):
3040
        # FIXME: Improve error message by including information about whether
3041
        # the upper or lower limit of the parameter fails the ipolicy.
3042
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3043
               (group_info, group_info.name,
3044
                utils.CommaJoin(set(res_max + res_min))))
3045
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3046

    
3047
  def _ConvertPlainToDrbd(self, feedback_fn):
3048
    """Converts an instance from plain to drbd.
3049

3050
    """
3051
    feedback_fn("Converting template to drbd")
3052
    instance = self.instance
3053
    pnode = instance.primary_node
3054
    snode = self.op.remote_node
3055

    
3056
    assert instance.disk_template == constants.DT_PLAIN
3057

    
3058
    # create a fake disk info for _GenerateDiskTemplate
3059
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3060
                  constants.IDISK_VG: d.logical_id[0],
3061
                  constants.IDISK_NAME: d.name}
3062
                 for d in instance.disks]
3063
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3064
                                     instance.name, pnode, [snode],
3065
                                     disk_info, None, None, 0, feedback_fn,
3066
                                     self.diskparams)
3067
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3068
                                        self.diskparams)
3069
    p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
3070
    s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
3071
    info = GetInstanceInfoText(instance)
3072
    feedback_fn("Creating additional volumes...")
3073
    # first, create the missing data and meta devices
3074
    for disk in anno_disks:
3075
      # unfortunately this is... not too nice
3076
      CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3077
                           info, True, p_excl_stor)
3078
      for child in disk.children:
3079
        CreateSingleBlockDev(self, snode, instance, child, info, True,
3080
                             s_excl_stor)
3081
    # at this stage, all new LVs have been created, we can rename the
3082
    # old ones
3083
    feedback_fn("Renaming original volumes...")
3084
    rename_list = [(o, n.children[0].logical_id)
3085
                   for (o, n) in zip(instance.disks, new_disks)]
3086
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
3087
    result.Raise("Failed to rename original LVs")
3088

    
3089
    feedback_fn("Initializing DRBD devices...")
3090
    # all child devices are in place, we can now create the DRBD devices
3091
    try:
3092
      for disk in anno_disks:
3093
        for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3094
          f_create = node == pnode
3095
          CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3096
                               excl_stor)
3097
    except errors.GenericError, e:
3098
      feedback_fn("Initializing of DRBD devices failed;"
3099
                  " renaming back original volumes...")
3100
      for disk in new_disks:
3101
        self.cfg.SetDiskID(disk, pnode)
3102
      rename_back_list = [(n.children[0], o.logical_id)
3103
                          for (n, o) in zip(new_disks, instance.disks)]
3104
      result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3105
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3106
      raise
3107

    
3108
    # at this point, the instance has been modified
3109
    instance.disk_template = constants.DT_DRBD8
3110
    instance.disks = new_disks
3111
    self.cfg.Update(instance, feedback_fn)
3112

    
3113
    # Release node locks while waiting for sync
3114
    ReleaseLocks(self, locking.LEVEL_NODE)
3115

    
3116
    # disks are created, waiting for sync
3117
    disk_abort = not WaitForSync(self, instance,
3118
                                 oneshot=not self.op.wait_for_sync)
3119
    if disk_abort:
3120
      raise errors.OpExecError("There are some degraded disks for"
3121
                               " this instance, please cleanup manually")
3122

    
3123
    # Node resource locks will be released by caller
3124

    
3125
  def _ConvertDrbdToPlain(self, feedback_fn):
3126
    """Converts an instance from drbd to plain.
3127

3128
    """
3129
    instance = self.instance
3130

    
3131
    assert len(instance.secondary_nodes) == 1
3132
    assert instance.disk_template == constants.DT_DRBD8
3133

    
3134
    pnode = instance.primary_node
3135
    snode = instance.secondary_nodes[0]
3136
    feedback_fn("Converting template to plain")
3137

    
3138
    old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3139
    new_disks = [d.children[0] for d in instance.disks]
3140

    
3141
    # copy over size, mode and name
3142
    for parent, child in zip(old_disks, new_disks):
3143
      child.size = parent.size
3144
      child.mode = parent.mode
3145
      child.name = parent.name
3146

    
3147
    # this is a DRBD disk, return its port to the pool
3148
    # NOTE: this must be done right before the call to cfg.Update!
3149
    for disk in old_disks:
3150
      tcp_port = disk.logical_id[2]
3151
      self.cfg.AddTcpUdpPort(tcp_port)
3152

    
3153
    # update instance structure
3154
    instance.disks = new_disks
3155
    instance.disk_template = constants.DT_PLAIN
3156
    _UpdateIvNames(0, instance.disks)
3157
    self.cfg.Update(instance, feedback_fn)
3158

    
3159
    # Release locks in case removing disks takes a while
3160
    ReleaseLocks(self, locking.LEVEL_NODE)
3161

    
3162
    feedback_fn("Removing volumes on the secondary node...")
3163
    for disk in old_disks:
3164
      self.cfg.SetDiskID(disk, snode)
3165
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3166
      if msg:
3167
        self.LogWarning("Could not remove block device %s on node %s,"
3168
                        " continuing anyway: %s", disk.iv_name, snode, msg)
3169

    
3170
    feedback_fn("Removing unneeded volumes on the primary node...")
3171
    for idx, disk in enumerate(old_disks):
3172
      meta = disk.children[1]
3173
      self.cfg.SetDiskID(meta, pnode)
3174
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3175
      if msg:
3176
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3177
                        " continuing anyway: %s", idx, pnode, msg)
3178

    
3179
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3180
    self.LogInfo("Trying to hotplug device...")
3181
    msg = "hotplug:"
3182
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3183
                                          self.instance, action, dev_type,
3184
                                          (device, self.instance),
3185
                                          extra, seq)
3186
    if result.fail_msg:
3187
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3188
      self.LogInfo("Continuing execution..")
3189
      msg += "failed"
3190
    else:
3191
      self.LogInfo("Hotplug done.")
3192
      msg += "done"
3193
    return msg
3194

    
3195
  def _CreateNewDisk(self, idx, params, _):
3196
    """Creates a new disk.
3197

3198
    """
3199
    instance = self.instance
3200

    
3201
    # add a new disk
3202
    if instance.disk_template in constants.DTS_FILEBASED:
3203
      (file_driver, file_path) = instance.disks[0].logical_id
3204
      file_path = os.path.dirname(file_path)
3205
    else:
3206
      file_driver = file_path = None
3207

    
3208
    disk = \
3209
      GenerateDiskTemplate(self, instance.disk_template, instance.name,
3210
                           instance.primary_node, instance.secondary_nodes,
3211
                           [params], file_path, file_driver, idx,
3212
                           self.Log, self.diskparams)[0]
3213

    
3214
    new_disks = CreateDisks(self, instance, disks=[disk])
3215

    
3216
    if self.cluster.prealloc_wipe_disks:
3217
      # Wipe new disk
3218
      WipeOrCleanupDisks(self, instance,
3219
                         disks=[(idx, disk, 0)],
3220
                         cleanup=new_disks)
3221

    
3222
    changes = [
3223
      ("disk/%d" % idx,
3224
      "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3225
      ]
3226
    if self.op.hotplug:
3227
      self.cfg.SetDiskID(disk, self.instance.primary_node)
3228
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3229
                                               (disk, self.instance),
3230
                                               self.instance.name, True, idx)
3231
      if result.fail_msg:
3232
        changes.append(("disk/%d" % idx, "assemble:failed"))
3233
        self.LogWarning("Can't assemble newly created disk %d: %s",
3234
                        idx, result.fail_msg)
3235
      else:
3236
        _, link_name = result.payload
3237
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3238
                                  constants.HOTPLUG_TARGET_DISK,
3239
                                  disk, link_name, idx)
3240
        changes.append(("disk/%d" % idx, msg))
3241

    
3242
    return (disk, changes)
3243

    
3244
  def _ModifyDisk(self, idx, disk, params, _):
3245
    """Modifies a disk.
3246

3247
    """
3248
    changes = []
3249
    if constants.IDISK_MODE in params:
3250
      disk.mode = params.get(constants.IDISK_MODE)
3251
      changes.append(("disk.mode/%d" % idx, disk.mode))
3252

    
3253
    if constants.IDISK_NAME in params:
3254
      disk.name = params.get(constants.IDISK_NAME)
3255
      changes.append(("disk.name/%d" % idx, disk.name))
3256

    
3257
    # Modify arbitrary params in case instance template is ext
3258
    for key, value in params.iteritems():
3259
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3260
          self.instance.disk_template == constants.DT_EXT):
3261
        # stolen from GetUpdatedParams: default means reset/delete
3262
        if value.lower() == constants.VALUE_DEFAULT:
3263
          try:
3264
            del disk.params[key]
3265
          except KeyError:
3266
            pass
3267
        else:
3268
          disk.params[key] = value
3269
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3270

    
3271
    return changes
3272

    
3273
  def _RemoveDisk(self, idx, root, _):
3274
    """Removes a disk.
3275

3276
    """
3277
    hotmsg = ""
3278
    if self.op.hotplug:
3279
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3280
                                   constants.HOTPLUG_TARGET_DISK,
3281
                                   root, None, idx)
3282
      ShutdownInstanceDisks(self, self.instance, [root])
3283

    
3284
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3285
    for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3286
      self.cfg.SetDiskID(disk, node)
3287
      msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3288
      if msg:
3289
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3290
                        " continuing anyway", idx, node, msg)
3291

    
3292
    # if this is a DRBD disk, return its port to the pool
3293
    if root.dev_type in constants.LDS_DRBD:
3294
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3295

    
3296
    return hotmsg
3297

    
3298
  def _CreateNewNic(self, idx, params, private):
3299
    """Creates data structure for a new network interface.
3300

3301
    """
3302
    mac = params[constants.INIC_MAC]
3303
    ip = params.get(constants.INIC_IP, None)
3304
    net = params.get(constants.INIC_NETWORK, None)
3305
    name = params.get(constants.INIC_NAME, None)
3306
    net_uuid = self.cfg.LookupNetwork(net)
3307
    #TODO: not private.filled?? can a nic have no nicparams??
3308
    nicparams = private.filled
3309
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3310
                       nicparams=nicparams)
3311
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3312

    
3313
    changes = [
3314
      ("nic.%d" % idx,
3315
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3316
       (mac, ip, private.filled[constants.NIC_MODE],
3317
       private.filled[constants.NIC_LINK], net)),
3318
      ]
3319

    
3320
    if self.op.hotplug:
3321
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3322
                                constants.HOTPLUG_TARGET_NIC,
3323
                                nobj, None, idx)
3324
      changes.append(("nic.%d" % idx, msg))
3325

    
3326
    return (nobj, changes)
3327

    
3328
  def _ApplyNicMods(self, idx, nic, params, private):
3329
    """Modifies a network interface.
3330

3331
    """
3332
    changes = []
3333

    
3334
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3335
      if key in params:
3336
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3337
        setattr(nic, key, params[key])
3338

    
3339
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3340
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3341
    if new_net_uuid != nic.network:
3342
      changes.append(("nic.network/%d" % idx, new_net))
3343
      nic.network = new_net_uuid
3344

    
3345
    if private.filled:
3346
      nic.nicparams = private.filled
3347

    
3348
      for (key, val) in nic.nicparams.items():
3349
        changes.append(("nic.%s/%d" % (key, idx), val))
3350

    
3351
    if self.op.hotplug:
3352
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3353
                                constants.HOTPLUG_TARGET_NIC,
3354
                                nic, None, idx)
3355
      changes.append(("nic/%d" % idx, msg))
3356

    
3357
    return changes
3358

    
3359
  def _RemoveNic(self, idx, nic, _):
3360
    if self.op.hotplug:
3361
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3362
                                 constants.HOTPLUG_TARGET_NIC,
3363
                                 nic, None, idx)
3364

    
3365
  def Exec(self, feedback_fn):
3366
    """Modifies an instance.
3367

3368
    All parameters take effect only at the next restart of the instance.
3369

3370
    """
3371
    # Process here the warnings from CheckPrereq, as we don't have a
3372
    # feedback_fn there.
3373
    # TODO: Replace with self.LogWarning
3374
    for warn in self.warn:
3375
      feedback_fn("WARNING: %s" % warn)
3376

    
3377
    assert ((self.op.disk_template is None) ^
3378
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3379
      "Not owning any node resource locks"
3380

    
3381
    result = []
3382
    instance = self.instance
3383

    
3384
    # New primary node
3385
    if self.op.pnode:
3386
      instance.primary_node = self.op.pnode
3387

    
3388
    # runtime memory
3389
    if self.op.runtime_mem:
3390
      rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3391
                                                     instance,
3392
                                                     self.op.runtime_mem)
3393
      rpcres.Raise("Cannot modify instance runtime memory")
3394
      result.append(("runtime_memory", self.op.runtime_mem))
3395

    
3396
    # Apply disk changes
3397
    _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3398
                        self._CreateNewDisk, self._ModifyDisk,
3399
                        self._RemoveDisk)
3400
    _UpdateIvNames(0, instance.disks)
3401

    
3402
    if self.op.disk_template:
3403
      if __debug__:
3404
        check_nodes = set(instance.all_nodes)
3405
        if self.op.remote_node:
3406
          check_nodes.add(self.op.remote_node)
3407
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3408
          owned = self.owned_locks(level)
3409
          assert not (check_nodes - owned), \
3410
            ("Not owning the correct locks, owning %r, expected at least %r" %
3411
             (owned, check_nodes))
3412

    
3413
      r_shut = ShutdownInstanceDisks(self, instance)
3414
      if not r_shut:
3415
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3416
                                 " proceed with disk template conversion")
3417
      mode = (instance.disk_template, self.op.disk_template)
3418
      try:
3419
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3420
      except:
3421
        self.cfg.ReleaseDRBDMinors(instance.name)
3422
        raise
3423
      result.append(("disk_template", self.op.disk_template))
3424

    
3425
      assert instance.disk_template == self.op.disk_template, \
3426
        ("Expected disk template '%s', found '%s'" %
3427
         (self.op.disk_template, instance.disk_template))
3428

    
3429
    # Release node and resource locks if there are any (they might already have
3430
    # been released during disk conversion)
3431
    ReleaseLocks(self, locking.LEVEL_NODE)
3432
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3433

    
3434
    # Apply NIC changes
3435
    if self._new_nics is not None:
3436
      instance.nics = self._new_nics
3437
      result.extend(self._nic_chgdesc)
3438

    
3439
    # hvparams changes
3440
    if self.op.hvparams:
3441
      instance.hvparams = self.hv_inst
3442
      for key, val in self.op.hvparams.iteritems():
3443
        result.append(("hv/%s" % key, val))
3444

    
3445
    # beparams changes
3446
    if self.op.beparams:
3447
      instance.beparams = self.be_inst
3448
      for key, val in self.op.beparams.iteritems():
3449
        result.append(("be/%s" % key, val))
3450

    
3451
    # OS change
3452
    if self.op.os_name:
3453
      instance.os = self.op.os_name
3454

    
3455
    # osparams changes
3456
    if self.op.osparams:
3457
      instance.osparams = self.os_inst
3458
      for key, val in self.op.osparams.iteritems():
3459
        result.append(("os/%s" % key, val))
3460

    
3461
    if self.op.offline is None:
3462
      # Ignore
3463
      pass
3464
    elif self.op.offline:
3465
      # Mark instance as offline
3466
      self.cfg.MarkInstanceOffline(instance.name)
3467
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3468
    else:
3469
      # Mark instance as online, but stopped
3470
      self.cfg.MarkInstanceDown(instance.name)
3471
      result.append(("admin_state", constants.ADMINST_DOWN))
3472

    
3473
    self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3474

    
3475
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3476
                self.owned_locks(locking.LEVEL_NODE)), \
3477
      "All node locks should have been released by now"
3478

    
3479
    return result
3480

    
3481
  _DISK_CONVERSIONS = {
3482
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3483
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3484
    }
3485

    
3486

    
3487
class LUInstanceChangeGroup(LogicalUnit):
3488
  HPATH = "instance-change-group"
3489
  HTYPE = constants.HTYPE_INSTANCE
3490
  REQ_BGL = False
3491

    
3492
  def ExpandNames(self):
3493
    self.share_locks = ShareAll()
3494

    
3495
    self.needed_locks = {
3496
      locking.LEVEL_NODEGROUP: [],
3497
      locking.LEVEL_NODE: [],
3498
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3499
      }
3500

    
3501
    self._ExpandAndLockInstance()
3502

    
3503
    if self.op.target_groups:
3504
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3505
                                  self.op.target_groups)
3506
    else:
3507
      self.req_target_uuids = None
3508

    
3509
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3510

    
3511
  def DeclareLocks(self, level):
3512
    if level == locking.LEVEL_NODEGROUP:
3513
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3514

    
3515
      if self.req_target_uuids:
3516
        lock_groups = set(self.req_target_uuids)
3517

    
3518
        # Lock all groups used by instance optimistically; this requires going
3519
        # via the node before it's locked, requiring verification later on
3520
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3521
        lock_groups.update(instance_groups)
3522
      else:
3523
        # No target groups, need to lock all of them
3524
        lock_groups = locking.ALL_SET
3525

    
3526
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3527

    
3528
    elif level == locking.LEVEL_NODE:
3529
      if self.req_target_uuids:
3530
        # Lock all nodes used by instances
3531
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3532
        self._LockInstancesNodes()
3533

    
3534
        # Lock all nodes in all potential target groups
3535
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3536
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3537
        member_nodes = [node_name
3538
                        for group in lock_groups
3539
                        for node_name in self.cfg.GetNodeGroup(group).members]
3540
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3541
      else:
3542
        # Lock all nodes as all groups are potential targets
3543
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3544

    
3545
  def CheckPrereq(self):
3546
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3547
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3548
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3549

    
3550
    assert (self.req_target_uuids is None or
3551
            owned_groups.issuperset(self.req_target_uuids))
3552
    assert owned_instances == set([self.op.instance_name])
3553

    
3554
    # Get instance information
3555
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3556

    
3557
    # Check if node groups for locked instance are still correct
3558
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3559
      ("Instance %s's nodes changed while we kept the lock" %
3560
       self.op.instance_name)
3561

    
3562
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3563
                                          owned_groups)
3564

    
3565
    if self.req_target_uuids:
3566
      # User requested specific target groups
3567
      self.target_uuids = frozenset(self.req_target_uuids)
3568
    else:
3569
      # All groups except those used by the instance are potential targets
3570
      self.target_uuids = owned_groups - inst_groups
3571

    
3572
    conflicting_groups = self.target_uuids & inst_groups
3573
    if conflicting_groups:
3574
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3575
                                 " used by the instance '%s'" %
3576
                                 (utils.CommaJoin(conflicting_groups),
3577
                                  self.op.instance_name),
3578
                                 errors.ECODE_INVAL)
3579

    
3580
    if not self.target_uuids:
3581
      raise errors.OpPrereqError("There are no possible target groups",
3582
                                 errors.ECODE_INVAL)
3583

    
3584
  def BuildHooksEnv(self):
3585
    """Build hooks env.
3586

3587
    """
3588
    assert self.target_uuids
3589

    
3590
    env = {
3591
      "TARGET_GROUPS": " ".join(self.target_uuids),
3592
      }
3593

    
3594
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3595

    
3596
    return env
3597

    
3598
  def BuildHooksNodes(self):
3599
    """Build hooks nodes.
3600

3601
    """
3602
    mn = self.cfg.GetMasterNode()
3603
    return ([mn], [mn])
3604

    
3605
  def Exec(self, feedback_fn):
3606
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3607

    
3608
    assert instances == [self.op.instance_name], "Instance not locked"
3609

    
3610
    req = iallocator.IAReqGroupChange(instances=instances,
3611
                                      target_groups=list(self.target_uuids))
3612
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3613

    
3614
    ial.Run(self.op.iallocator)
3615

    
3616
    if not ial.success:
3617
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3618
                                 " instance '%s' using iallocator '%s': %s" %
3619
                                 (self.op.instance_name, self.op.iallocator,
3620
                                  ial.info), errors.ECODE_NORES)
3621

    
3622
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3623

    
3624
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3625
                 " instance '%s'", len(jobs), self.op.instance_name)
3626

    
3627
    return ResultWithJobs(jobs)