Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 7a8d0d76

History | View | Annotate | Download (138.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
64

    
65
import ganeti.masterd.instance
66

    
67

    
68
#: Type description for changes as returned by L{_ApplyContainerMods}'s
69
#: callbacks
70
_TApplyContModsCbChanges = \
71
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
72
    ht.TNonEmptyString,
73
    ht.TAny,
74
    ])))
75

    
76

    
77
def _CheckHostnameSane(lu, name):
78
  """Ensures that a given hostname resolves to a 'sane' name.
79

80
  The given name is required to be a prefix of the resolved hostname,
81
  to prevent accidental mismatches.
82

83
  @param lu: the logical unit on behalf of which we're checking
84
  @param name: the name we should resolve and check
85
  @return: the resolved hostname object
86

87
  """
88
  hostname = netutils.GetHostname(name=name)
89
  if hostname.name != name:
90
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91
  if not utils.MatchNameComponent(name, [hostname.name]):
92
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93
                                " same as given hostname '%s'") %
94
                               (hostname.name, name), errors.ECODE_INVAL)
95
  return hostname
96

    
97

    
98
def _CheckOpportunisticLocking(op):
99
  """Generate error if opportunistic locking is not possible.
100

101
  """
102
  if op.opportunistic_locking and not op.iallocator:
103
    raise errors.OpPrereqError("Opportunistic locking is only available in"
104
                               " combination with an instance allocator",
105
                               errors.ECODE_INVAL)
106

    
107

    
108
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109
  """Wrapper around IAReqInstanceAlloc.
110

111
  @param op: The instance opcode
112
  @param disks: The computed disks
113
  @param nics: The computed nics
114
  @param beparams: The full filled beparams
115
  @param node_whitelist: List of nodes which should appear as online to the
116
    allocator (unless the node is already marked offline)
117

118
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
119

120
  """
121
  spindle_use = beparams[constants.BE_SPINDLE_USE]
122
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123
                                       disk_template=op.disk_template,
124
                                       tags=op.tags,
125
                                       os=op.os_type,
126
                                       vcpus=beparams[constants.BE_VCPUS],
127
                                       memory=beparams[constants.BE_MAXMEM],
128
                                       spindle_use=spindle_use,
129
                                       disks=disks,
130
                                       nics=[n.ToDict() for n in nics],
131
                                       hypervisor=op.hypervisor,
132
                                       node_whitelist=node_whitelist)
133

    
134

    
135
def _ComputeFullBeParams(op, cluster):
136
  """Computes the full beparams.
137

138
  @param op: The instance opcode
139
  @param cluster: The cluster config object
140

141
  @return: The fully filled beparams
142

143
  """
144
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
145
  for param, value in op.beparams.iteritems():
146
    if value == constants.VALUE_AUTO:
147
      op.beparams[param] = default_beparams[param]
148
  objects.UpgradeBeParams(op.beparams)
149
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150
  return cluster.SimpleFillBE(op.beparams)
151

    
152

    
153
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154
  """Computes the nics.
155

156
  @param op: The instance opcode
157
  @param cluster: Cluster configuration object
158
  @param default_ip: The default ip to assign
159
  @param cfg: An instance of the configuration object
160
  @param ec_id: Execution context ID
161

162
  @returns: The build up nics
163

164
  """
165
  nics = []
166
  for nic in op.nics:
167
    nic_mode_req = nic.get(constants.INIC_MODE, None)
168
    nic_mode = nic_mode_req
169
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
171

    
172
    net = nic.get(constants.INIC_NETWORK, None)
173
    link = nic.get(constants.NIC_LINK, None)
174
    ip = nic.get(constants.INIC_IP, None)
175

    
176
    if net is None or net.lower() == constants.VALUE_NONE:
177
      net = None
178
    else:
179
      if nic_mode_req is not None or link is not None:
180
        raise errors.OpPrereqError("If network is given, no mode or link"
181
                                   " is allowed to be passed",
182
                                   errors.ECODE_INVAL)
183

    
184
    # ip validity checks
185
    if ip is None or ip.lower() == constants.VALUE_NONE:
186
      nic_ip = None
187
    elif ip.lower() == constants.VALUE_AUTO:
188
      if not op.name_check:
189
        raise errors.OpPrereqError("IP address set to auto but name checks"
190
                                   " have been skipped",
191
                                   errors.ECODE_INVAL)
192
      nic_ip = default_ip
193
    else:
194
      # We defer pool operations until later, so that the iallocator has
195
      # filled in the instance's node(s) dimara
196
      if ip.lower() == constants.NIC_IP_POOL:
197
        if net is None:
198
          raise errors.OpPrereqError("if ip=pool, parameter network"
199
                                     " must be passed too",
200
                                     errors.ECODE_INVAL)
201

    
202
      elif not netutils.IPAddress.IsValid(ip):
203
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
204
                                   errors.ECODE_INVAL)
205

    
206
      nic_ip = ip
207

    
208
    # TODO: check the ip address for uniqueness
209
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
211
                                 errors.ECODE_INVAL)
212

    
213
    # MAC address verification
214
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216
      mac = utils.NormalizeAndValidateMac(mac)
217

    
218
      try:
219
        # TODO: We need to factor this out
220
        cfg.ReserveMAC(mac, ec_id)
221
      except errors.ReservationError:
222
        raise errors.OpPrereqError("MAC address %s already in use"
223
                                   " in cluster" % mac,
224
                                   errors.ECODE_NOTUNIQUE)
225

    
226
    #  Build nic parameters
227
    nicparams = {}
228
    if nic_mode_req:
229
      nicparams[constants.NIC_MODE] = nic_mode
230
    if link:
231
      nicparams[constants.NIC_LINK] = link
232

    
233
    check_params = cluster.SimpleFillNIC(nicparams)
234
    objects.NIC.CheckParameterSyntax(check_params)
235
    net_uuid = cfg.LookupNetwork(net)
236
    name = nic.get(constants.INIC_NAME, None)
237
    if name is not None and name.lower() == constants.VALUE_NONE:
238
      name = None
239
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240
                          network=net_uuid, nicparams=nicparams)
241
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
242
    nics.append(nic_obj)
243

    
244
  return nics
245

    
246

    
247
def _CheckForConflictingIp(lu, ip, node):
248
  """In case of conflicting IP address raise error.
249

250
  @type ip: string
251
  @param ip: IP address
252
  @type node: string
253
  @param node: node name
254

255
  """
256
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257
  if conf_net is not None:
258
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259
                                " network %s, but the target NIC does not." %
260
                                (ip, conf_net)),
261
                               errors.ECODE_STATE)
262

    
263
  return (None, None)
264

    
265

    
266
def _ComputeIPolicyInstanceSpecViolation(
267
  ipolicy, instance_spec, disk_template,
268
  _compute_fn=ComputeIPolicySpecViolation):
269
  """Compute if instance specs meets the specs of ipolicy.
270

271
  @type ipolicy: dict
272
  @param ipolicy: The ipolicy to verify against
273
  @param instance_spec: dict
274
  @param instance_spec: The instance spec to verify
275
  @type disk_template: string
276
  @param disk_template: the disk template of the instance
277
  @param _compute_fn: The function to verify ipolicy (unittest only)
278
  @see: L{ComputeIPolicySpecViolation}
279

280
  """
281
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
287

    
288
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289
                     disk_sizes, spindle_use, disk_template)
290

    
291

    
292
def _CheckOSVariant(os_obj, name):
293
  """Check whether an OS name conforms to the os variants specification.
294

295
  @type os_obj: L{objects.OS}
296
  @param os_obj: OS object to check
297
  @type name: string
298
  @param name: OS name passed by the user, to check for validity
299

300
  """
301
  variant = objects.OS.GetVariant(name)
302
  if not os_obj.supported_variants:
303
    if variant:
304
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305
                                 " passed)" % (os_obj.name, variant),
306
                                 errors.ECODE_INVAL)
307
    return
308
  if not variant:
309
    raise errors.OpPrereqError("OS name must include a variant",
310
                               errors.ECODE_INVAL)
311

    
312
  if variant not in os_obj.supported_variants:
313
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
314

    
315

    
316
class LUInstanceCreate(LogicalUnit):
317
  """Create an instance.
318

319
  """
320
  HPATH = "instance-add"
321
  HTYPE = constants.HTYPE_INSTANCE
322
  REQ_BGL = False
323

    
324
  def CheckArguments(self):
325
    """Check arguments.
326

327
    """
328
    # do not require name_check to ease forward/backward compatibility
329
    # for tools
330
    if self.op.no_install and self.op.start:
331
      self.LogInfo("No-installation mode selected, disabling startup")
332
      self.op.start = False
333
    # validate/normalize the instance name
334
    self.op.instance_name = \
335
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
336

    
337
    if self.op.ip_check and not self.op.name_check:
338
      # TODO: make the ip check more flexible and not depend on the name check
339
      raise errors.OpPrereqError("Cannot do IP address check without a name"
340
                                 " check", errors.ECODE_INVAL)
341

    
342
    # check nics' parameter names
343
    for nic in self.op.nics:
344
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345
    # check that NIC's parameters names are unique and valid
346
    utils.ValidateDeviceNames("NIC", self.op.nics)
347

    
348
    # check that disk's names are unique and valid
349
    utils.ValidateDeviceNames("disk", self.op.disks)
350

    
351
    cluster = self.cfg.GetClusterInfo()
352
    if not self.op.disk_template in cluster.enabled_disk_templates:
353
      raise errors.OpPrereqError("Cannot create an instance with disk template"
354
                                 " '%s', because it is not enabled in the"
355
                                 " cluster. Enabled disk templates are: %s." %
356
                                 (self.op.disk_template,
357
                                  ",".join(cluster.enabled_disk_templates)))
358

    
359
    # check disks. parameter names and consistent adopt/no-adopt strategy
360
    has_adopt = has_no_adopt = False
361
    for disk in self.op.disks:
362
      if self.op.disk_template != constants.DT_EXT:
363
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364
      if constants.IDISK_ADOPT in disk:
365
        has_adopt = True
366
      else:
367
        has_no_adopt = True
368
    if has_adopt and has_no_adopt:
369
      raise errors.OpPrereqError("Either all disks are adopted or none is",
370
                                 errors.ECODE_INVAL)
371
    if has_adopt:
372
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373
        raise errors.OpPrereqError("Disk adoption is not supported for the"
374
                                   " '%s' disk template" %
375
                                   self.op.disk_template,
376
                                   errors.ECODE_INVAL)
377
      if self.op.iallocator is not None:
378
        raise errors.OpPrereqError("Disk adoption not allowed with an"
379
                                   " iallocator script", errors.ECODE_INVAL)
380
      if self.op.mode == constants.INSTANCE_IMPORT:
381
        raise errors.OpPrereqError("Disk adoption not allowed for"
382
                                   " instance import", errors.ECODE_INVAL)
383
    else:
384
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
385
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386
                                   " but no 'adopt' parameter given" %
387
                                   self.op.disk_template,
388
                                   errors.ECODE_INVAL)
389

    
390
    self.adopt_disks = has_adopt
391

    
392
    # instance name verification
393
    if self.op.name_check:
394
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395
      self.op.instance_name = self.hostname1.name
396
      # used in CheckPrereq for ip ping check
397
      self.check_ip = self.hostname1.ip
398
    else:
399
      self.check_ip = None
400

    
401
    # file storage checks
402
    if (self.op.file_driver and
403
        not self.op.file_driver in constants.FILE_DRIVER):
404
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
405
                                 self.op.file_driver, errors.ECODE_INVAL)
406

    
407
    # set default file_driver if unset and required
408
    if (not self.op.file_driver and
409
        self.op.disk_template in [constants.DT_FILE,
410
                                  constants.DT_SHARED_FILE]):
411
      self.op.file_driver = constants.FD_LOOP
412

    
413
    if self.op.disk_template == constants.DT_FILE:
414
      opcodes.RequireFileStorage()
415
    elif self.op.disk_template == constants.DT_SHARED_FILE:
416
      opcodes.RequireSharedFileStorage()
417

    
418
    ### Node/iallocator related checks
419
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
420

    
421
    if self.op.pnode is not None:
422
      if self.op.disk_template in constants.DTS_INT_MIRROR:
423
        if self.op.snode is None:
424
          raise errors.OpPrereqError("The networked disk templates need"
425
                                     " a mirror node", errors.ECODE_INVAL)
426
      elif self.op.snode:
427
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
428
                        " template")
429
        self.op.snode = None
430

    
431
    _CheckOpportunisticLocking(self.op)
432

    
433
    self._cds = GetClusterDomainSecret()
434

    
435
    if self.op.mode == constants.INSTANCE_IMPORT:
436
      # On import force_variant must be True, because if we forced it at
437
      # initial install, our only chance when importing it back is that it
438
      # works again!
439
      self.op.force_variant = True
440

    
441
      if self.op.no_install:
442
        self.LogInfo("No-installation mode has no effect during import")
443

    
444
    elif self.op.mode == constants.INSTANCE_CREATE:
445
      if self.op.os_type is None:
446
        raise errors.OpPrereqError("No guest OS specified",
447
                                   errors.ECODE_INVAL)
448
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
449
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
450
                                   " installation" % self.op.os_type,
451
                                   errors.ECODE_STATE)
452
      if self.op.disk_template is None:
453
        raise errors.OpPrereqError("No disk template specified",
454
                                   errors.ECODE_INVAL)
455

    
456
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
457
      # Check handshake to ensure both clusters have the same domain secret
458
      src_handshake = self.op.source_handshake
459
      if not src_handshake:
460
        raise errors.OpPrereqError("Missing source handshake",
461
                                   errors.ECODE_INVAL)
462

    
463
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
464
                                                           src_handshake)
465
      if errmsg:
466
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
467
                                   errors.ECODE_INVAL)
468

    
469
      # Load and check source CA
470
      self.source_x509_ca_pem = self.op.source_x509_ca
471
      if not self.source_x509_ca_pem:
472
        raise errors.OpPrereqError("Missing source X509 CA",
473
                                   errors.ECODE_INVAL)
474

    
475
      try:
476
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
477
                                                    self._cds)
478
      except OpenSSL.crypto.Error, err:
479
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
480
                                   (err, ), errors.ECODE_INVAL)
481

    
482
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
483
      if errcode is not None:
484
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
485
                                   errors.ECODE_INVAL)
486

    
487
      self.source_x509_ca = cert
488

    
489
      src_instance_name = self.op.source_instance_name
490
      if not src_instance_name:
491
        raise errors.OpPrereqError("Missing source instance name",
492
                                   errors.ECODE_INVAL)
493

    
494
      self.source_instance_name = \
495
        netutils.GetHostname(name=src_instance_name).name
496

    
497
    else:
498
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
499
                                 self.op.mode, errors.ECODE_INVAL)
500

    
501
  def ExpandNames(self):
502
    """ExpandNames for CreateInstance.
503

504
    Figure out the right locks for instance creation.
505

506
    """
507
    self.needed_locks = {}
508

    
509
    instance_name = self.op.instance_name
510
    # this is just a preventive check, but someone might still add this
511
    # instance in the meantime, and creation will fail at lock-add time
512
    if instance_name in self.cfg.GetInstanceList():
513
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
514
                                 instance_name, errors.ECODE_EXISTS)
515

    
516
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
517

    
518
    if self.op.iallocator:
519
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
520
      # specifying a group on instance creation and then selecting nodes from
521
      # that group
522
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
523
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
524

    
525
      if self.op.opportunistic_locking:
526
        self.opportunistic_locks[locking.LEVEL_NODE] = True
527
    else:
528
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
529
      nodelist = [self.op.pnode]
530
      if self.op.snode is not None:
531
        self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
532
        nodelist.append(self.op.snode)
533
      self.needed_locks[locking.LEVEL_NODE] = nodelist
534

    
535
    # in case of import lock the source node too
536
    if self.op.mode == constants.INSTANCE_IMPORT:
537
      src_node = self.op.src_node
538
      src_path = self.op.src_path
539

    
540
      if src_path is None:
541
        self.op.src_path = src_path = self.op.instance_name
542

    
543
      if src_node is None:
544
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
545
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
546
        self.op.src_node = None
547
        if os.path.isabs(src_path):
548
          raise errors.OpPrereqError("Importing an instance from a path"
549
                                     " requires a source node option",
550
                                     errors.ECODE_INVAL)
551
      else:
552
        self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
553
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
554
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
555
        if not os.path.isabs(src_path):
556
          self.op.src_path = src_path = \
557
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
558

    
559
    self.needed_locks[locking.LEVEL_NODE_RES] = \
560
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
561

    
562
  def DeclareLocks(self, level):
563
    if level == locking.LEVEL_NODE_RES and \
564
      self.opportunistic_locks[locking.LEVEL_NODE]:
565
      # Even when using opportunistic locking, we require the same set of
566
      # NODE_RES locks as we got NODE locks
567
      self.needed_locks[locking.LEVEL_NODE_RES] = \
568
        self.owned_locks(locking.LEVEL_NODE)
569

    
570
  def _RunAllocator(self):
571
    """Run the allocator based on input opcode.
572

573
    """
574
    if self.op.opportunistic_locking:
575
      # Only consider nodes for which a lock is held
576
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
577
    else:
578
      node_whitelist = None
579

    
580
    #TODO Export network to iallocator so that it chooses a pnode
581
    #     in a nodegroup that has the desired network connected to
582
    req = _CreateInstanceAllocRequest(self.op, self.disks,
583
                                      self.nics, self.be_full,
584
                                      node_whitelist)
585
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
586

    
587
    ial.Run(self.op.iallocator)
588

    
589
    if not ial.success:
590
      # When opportunistic locks are used only a temporary failure is generated
591
      if self.op.opportunistic_locking:
592
        ecode = errors.ECODE_TEMP_NORES
593
      else:
594
        ecode = errors.ECODE_NORES
595

    
596
      raise errors.OpPrereqError("Can't compute nodes using"
597
                                 " iallocator '%s': %s" %
598
                                 (self.op.iallocator, ial.info),
599
                                 ecode)
600

    
601
    self.op.pnode = ial.result[0]
602
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
603
                 self.op.instance_name, self.op.iallocator,
604
                 utils.CommaJoin(ial.result))
605

    
606
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
607

    
608
    if req.RequiredNodes() == 2:
609
      self.op.snode = ial.result[1]
610

    
611
  def BuildHooksEnv(self):
612
    """Build hooks env.
613

614
    This runs on master, primary and secondary nodes of the instance.
615

616
    """
617
    env = {
618
      "ADD_MODE": self.op.mode,
619
      }
620
    if self.op.mode == constants.INSTANCE_IMPORT:
621
      env["SRC_NODE"] = self.op.src_node
622
      env["SRC_PATH"] = self.op.src_path
623
      env["SRC_IMAGES"] = self.src_images
624

    
625
    env.update(BuildInstanceHookEnv(
626
      name=self.op.instance_name,
627
      primary_node=self.op.pnode,
628
      secondary_nodes=self.secondaries,
629
      status=self.op.start,
630
      os_type=self.op.os_type,
631
      minmem=self.be_full[constants.BE_MINMEM],
632
      maxmem=self.be_full[constants.BE_MAXMEM],
633
      vcpus=self.be_full[constants.BE_VCPUS],
634
      nics=NICListToTuple(self, self.nics),
635
      disk_template=self.op.disk_template,
636
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
637
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE], {})
638
             for d in self.disks],
639
      bep=self.be_full,
640
      hvp=self.hv_full,
641
      hypervisor_name=self.op.hypervisor,
642
      tags=self.op.tags,
643
      ))
644

    
645
    return env
646

    
647
  def BuildHooksNodes(self):
648
    """Build hooks nodes.
649

650
    """
651
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
652
    return nl, nl
653

    
654
  def _ReadExportInfo(self):
655
    """Reads the export information from disk.
656

657
    It will override the opcode source node and path with the actual
658
    information, if these two were not specified before.
659

660
    @return: the export information
661

662
    """
663
    assert self.op.mode == constants.INSTANCE_IMPORT
664

    
665
    src_node = self.op.src_node
666
    src_path = self.op.src_path
667

    
668
    if src_node is None:
669
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
670
      exp_list = self.rpc.call_export_list(locked_nodes)
671
      found = False
672
      for node in exp_list:
673
        if exp_list[node].fail_msg:
674
          continue
675
        if src_path in exp_list[node].payload:
676
          found = True
677
          self.op.src_node = src_node = node
678
          self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
679
                                                       src_path)
680
          break
681
      if not found:
682
        raise errors.OpPrereqError("No export found for relative path %s" %
683
                                   src_path, errors.ECODE_INVAL)
684

    
685
    CheckNodeOnline(self, src_node)
686
    result = self.rpc.call_export_info(src_node, src_path)
687
    result.Raise("No export or invalid export found in dir %s" % src_path)
688

    
689
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
690
    if not export_info.has_section(constants.INISECT_EXP):
691
      raise errors.ProgrammerError("Corrupted export config",
692
                                   errors.ECODE_ENVIRON)
693

    
694
    ei_version = export_info.get(constants.INISECT_EXP, "version")
695
    if (int(ei_version) != constants.EXPORT_VERSION):
696
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
697
                                 (ei_version, constants.EXPORT_VERSION),
698
                                 errors.ECODE_ENVIRON)
699
    return export_info
700

    
701
  def _ReadExportParams(self, einfo):
702
    """Use export parameters as defaults.
703

704
    In case the opcode doesn't specify (as in override) some instance
705
    parameters, then try to use them from the export information, if
706
    that declares them.
707

708
    """
709
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
710

    
711
    if self.op.disk_template is None:
712
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
713
        self.op.disk_template = einfo.get(constants.INISECT_INS,
714
                                          "disk_template")
715
        if self.op.disk_template not in constants.DISK_TEMPLATES:
716
          raise errors.OpPrereqError("Disk template specified in configuration"
717
                                     " file is not one of the allowed values:"
718
                                     " %s" %
719
                                     " ".join(constants.DISK_TEMPLATES),
720
                                     errors.ECODE_INVAL)
721
      else:
722
        raise errors.OpPrereqError("No disk template specified and the export"
723
                                   " is missing the disk_template information",
724
                                   errors.ECODE_INVAL)
725

    
726
    if not self.op.disks:
727
      disks = []
728
      # TODO: import the disk iv_name too
729
      for idx in range(constants.MAX_DISKS):
730
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
731
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
732
          disks.append({constants.IDISK_SIZE: disk_sz})
733
      self.op.disks = disks
734
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
735
        raise errors.OpPrereqError("No disk info specified and the export"
736
                                   " is missing the disk information",
737
                                   errors.ECODE_INVAL)
738

    
739
    if not self.op.nics:
740
      nics = []
741
      for idx in range(constants.MAX_NICS):
742
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
743
          ndict = {}
744
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
745
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
746
            ndict[name] = v
747
          nics.append(ndict)
748
        else:
749
          break
750
      self.op.nics = nics
751

    
752
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
753
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
754

    
755
    if (self.op.hypervisor is None and
756
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
757
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
758

    
759
    if einfo.has_section(constants.INISECT_HYP):
760
      # use the export parameters but do not override the ones
761
      # specified by the user
762
      for name, value in einfo.items(constants.INISECT_HYP):
763
        if name not in self.op.hvparams:
764
          self.op.hvparams[name] = value
765

    
766
    if einfo.has_section(constants.INISECT_BEP):
767
      # use the parameters, without overriding
768
      for name, value in einfo.items(constants.INISECT_BEP):
769
        if name not in self.op.beparams:
770
          self.op.beparams[name] = value
771
        # Compatibility for the old "memory" be param
772
        if name == constants.BE_MEMORY:
773
          if constants.BE_MAXMEM not in self.op.beparams:
774
            self.op.beparams[constants.BE_MAXMEM] = value
775
          if constants.BE_MINMEM not in self.op.beparams:
776
            self.op.beparams[constants.BE_MINMEM] = value
777
    else:
778
      # try to read the parameters old style, from the main section
779
      for name in constants.BES_PARAMETERS:
780
        if (name not in self.op.beparams and
781
            einfo.has_option(constants.INISECT_INS, name)):
782
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
783

    
784
    if einfo.has_section(constants.INISECT_OSP):
785
      # use the parameters, without overriding
786
      for name, value in einfo.items(constants.INISECT_OSP):
787
        if name not in self.op.osparams:
788
          self.op.osparams[name] = value
789

    
790
  def _RevertToDefaults(self, cluster):
791
    """Revert the instance parameters to the default values.
792

793
    """
794
    # hvparams
795
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
796
    for name in self.op.hvparams.keys():
797
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
798
        del self.op.hvparams[name]
799
    # beparams
800
    be_defs = cluster.SimpleFillBE({})
801
    for name in self.op.beparams.keys():
802
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
803
        del self.op.beparams[name]
804
    # nic params
805
    nic_defs = cluster.SimpleFillNIC({})
806
    for nic in self.op.nics:
807
      for name in constants.NICS_PARAMETERS:
808
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
809
          del nic[name]
810
    # osparams
811
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
812
    for name in self.op.osparams.keys():
813
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
814
        del self.op.osparams[name]
815

    
816
  def _CalculateFileStorageDir(self):
817
    """Calculate final instance file storage dir.
818

819
    """
820
    # file storage dir calculation/check
821
    self.instance_file_storage_dir = None
822
    if self.op.disk_template in constants.DTS_FILEBASED:
823
      # build the full file storage dir path
824
      joinargs = []
825

    
826
      if self.op.disk_template == constants.DT_SHARED_FILE:
827
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
828
      else:
829
        get_fsd_fn = self.cfg.GetFileStorageDir
830

    
831
      cfg_storagedir = get_fsd_fn()
832
      if not cfg_storagedir:
833
        raise errors.OpPrereqError("Cluster file storage dir not defined",
834
                                   errors.ECODE_STATE)
835
      joinargs.append(cfg_storagedir)
836

    
837
      if self.op.file_storage_dir is not None:
838
        joinargs.append(self.op.file_storage_dir)
839

    
840
      joinargs.append(self.op.instance_name)
841

    
842
      # pylint: disable=W0142
843
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
844

    
845
  def CheckPrereq(self): # pylint: disable=R0914
846
    """Check prerequisites.
847

848
    """
849
    self._CalculateFileStorageDir()
850

    
851
    if self.op.mode == constants.INSTANCE_IMPORT:
852
      export_info = self._ReadExportInfo()
853
      self._ReadExportParams(export_info)
854
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
855
    else:
856
      self._old_instance_name = None
857

    
858
    if (not self.cfg.GetVGName() and
859
        self.op.disk_template not in constants.DTS_NOT_LVM):
860
      raise errors.OpPrereqError("Cluster does not support lvm-based"
861
                                 " instances", errors.ECODE_STATE)
862

    
863
    if (self.op.hypervisor is None or
864
        self.op.hypervisor == constants.VALUE_AUTO):
865
      self.op.hypervisor = self.cfg.GetHypervisorType()
866

    
867
    cluster = self.cfg.GetClusterInfo()
868
    enabled_hvs = cluster.enabled_hypervisors
869
    if self.op.hypervisor not in enabled_hvs:
870
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
871
                                 " cluster (%s)" %
872
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
873
                                 errors.ECODE_STATE)
874

    
875
    # Check tag validity
876
    for tag in self.op.tags:
877
      objects.TaggableObject.ValidateTag(tag)
878

    
879
    # check hypervisor parameter syntax (locally)
880
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
881
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
882
                                      self.op.hvparams)
883
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
884
    hv_type.CheckParameterSyntax(filled_hvp)
885
    self.hv_full = filled_hvp
886
    # check that we don't specify global parameters on an instance
887
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
888
                         "instance", "cluster")
889

    
890
    # fill and remember the beparams dict
891
    self.be_full = _ComputeFullBeParams(self.op, cluster)
892

    
893
    # build os parameters
894
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
895

    
896
    # now that hvp/bep are in final format, let's reset to defaults,
897
    # if told to do so
898
    if self.op.identify_defaults:
899
      self._RevertToDefaults(cluster)
900

    
901
    # NIC buildup
902
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
903
                             self.proc.GetECId())
904

    
905
    # disk checks/pre-build
906
    default_vg = self.cfg.GetVGName()
907
    self.disks = ComputeDisks(self.op, default_vg)
908

    
909
    if self.op.mode == constants.INSTANCE_IMPORT:
910
      disk_images = []
911
      for idx in range(len(self.disks)):
912
        option = "disk%d_dump" % idx
913
        if export_info.has_option(constants.INISECT_INS, option):
914
          # FIXME: are the old os-es, disk sizes, etc. useful?
915
          export_name = export_info.get(constants.INISECT_INS, option)
916
          image = utils.PathJoin(self.op.src_path, export_name)
917
          disk_images.append(image)
918
        else:
919
          disk_images.append(False)
920

    
921
      self.src_images = disk_images
922

    
923
      if self.op.instance_name == self._old_instance_name:
924
        for idx, nic in enumerate(self.nics):
925
          if nic.mac == constants.VALUE_AUTO:
926
            nic_mac_ini = "nic%d_mac" % idx
927
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
928

    
929
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
930

    
931
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
932
    if self.op.ip_check:
933
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
934
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
935
                                   (self.check_ip, self.op.instance_name),
936
                                   errors.ECODE_NOTUNIQUE)
937

    
938
    #### mac address generation
939
    # By generating here the mac address both the allocator and the hooks get
940
    # the real final mac address rather than the 'auto' or 'generate' value.
941
    # There is a race condition between the generation and the instance object
942
    # creation, which means that we know the mac is valid now, but we're not
943
    # sure it will be when we actually add the instance. If things go bad
944
    # adding the instance will abort because of a duplicate mac, and the
945
    # creation job will fail.
946
    for nic in self.nics:
947
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
948
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
949

    
950
    #### allocator run
951

    
952
    if self.op.iallocator is not None:
953
      self._RunAllocator()
954

    
955
    # Release all unneeded node locks
956
    keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
957
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
958
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
959
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
960

    
961
    assert (self.owned_locks(locking.LEVEL_NODE) ==
962
            self.owned_locks(locking.LEVEL_NODE_RES)), \
963
      "Node locks differ from node resource locks"
964

    
965
    #### node related checks
966

    
967
    # check primary node
968
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
969
    assert self.pnode is not None, \
970
      "Cannot retrieve locked node %s" % self.op.pnode
971
    if pnode.offline:
972
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
973
                                 pnode.name, errors.ECODE_STATE)
974
    if pnode.drained:
975
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
976
                                 pnode.name, errors.ECODE_STATE)
977
    if not pnode.vm_capable:
978
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
979
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
980

    
981
    self.secondaries = []
982

    
983
    # Fill in any IPs from IP pools. This must happen here, because we need to
984
    # know the nic's primary node, as specified by the iallocator
985
    for idx, nic in enumerate(self.nics):
986
      net_uuid = nic.network
987
      if net_uuid is not None:
988
        nobj = self.cfg.GetNetwork(net_uuid)
989
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
990
        if netparams is None:
991
          raise errors.OpPrereqError("No netparams found for network"
992
                                     " %s. Propably not connected to"
993
                                     " node's %s nodegroup" %
994
                                     (nobj.name, self.pnode.name),
995
                                     errors.ECODE_INVAL)
996
        self.LogInfo("NIC/%d inherits netparams %s" %
997
                     (idx, netparams.values()))
998
        nic.nicparams = dict(netparams)
999
        if nic.ip is not None:
1000
          if nic.ip.lower() == constants.NIC_IP_POOL:
1001
            try:
1002
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1003
            except errors.ReservationError:
1004
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1005
                                         " from the address pool" % idx,
1006
                                         errors.ECODE_STATE)
1007
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1008
          else:
1009
            try:
1010
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1011
                                 check=self.op.conflicts_check)
1012
            except errors.ReservationError:
1013
              raise errors.OpPrereqError("IP address %s already in use"
1014
                                         " or does not belong to network %s" %
1015
                                         (nic.ip, nobj.name),
1016
                                         errors.ECODE_NOTUNIQUE)
1017

    
1018
      # net is None, ip None or given
1019
      elif self.op.conflicts_check:
1020
        _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1021

    
1022
    # mirror node verification
1023
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1024
      if self.op.snode == pnode.name:
1025
        raise errors.OpPrereqError("The secondary node cannot be the"
1026
                                   " primary node", errors.ECODE_INVAL)
1027
      CheckNodeOnline(self, self.op.snode)
1028
      CheckNodeNotDrained(self, self.op.snode)
1029
      CheckNodeVmCapable(self, self.op.snode)
1030
      self.secondaries.append(self.op.snode)
1031

    
1032
      snode = self.cfg.GetNodeInfo(self.op.snode)
1033
      if pnode.group != snode.group:
1034
        self.LogWarning("The primary and secondary nodes are in two"
1035
                        " different node groups; the disk parameters"
1036
                        " from the first disk's node group will be"
1037
                        " used")
1038

    
1039
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1040
      nodes = [pnode]
1041
      if self.op.disk_template in constants.DTS_INT_MIRROR:
1042
        nodes.append(snode)
1043
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1044
      if compat.any(map(has_es, nodes)):
1045
        raise errors.OpPrereqError("Disk template %s not supported with"
1046
                                   " exclusive storage" % self.op.disk_template,
1047
                                   errors.ECODE_STATE)
1048

    
1049
    nodenames = [pnode.name] + self.secondaries
1050

    
1051
    if not self.adopt_disks:
1052
      if self.op.disk_template == constants.DT_RBD:
1053
        # _CheckRADOSFreeSpace() is just a placeholder.
1054
        # Any function that checks prerequisites can be placed here.
1055
        # Check if there is enough space on the RADOS cluster.
1056
        CheckRADOSFreeSpace()
1057
      elif self.op.disk_template == constants.DT_EXT:
1058
        # FIXME: Function that checks prereqs if needed
1059
        pass
1060
      else:
1061
        # Check lv size requirements, if not adopting
1062
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1063
        CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1064

    
1065
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1066
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1067
                                disk[constants.IDISK_ADOPT])
1068
                     for disk in self.disks])
1069
      if len(all_lvs) != len(self.disks):
1070
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1071
                                   errors.ECODE_INVAL)
1072
      for lv_name in all_lvs:
1073
        try:
1074
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1075
          # to ReserveLV uses the same syntax
1076
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1077
        except errors.ReservationError:
1078
          raise errors.OpPrereqError("LV named %s used by another instance" %
1079
                                     lv_name, errors.ECODE_NOTUNIQUE)
1080

    
1081
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1082
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1083

    
1084
      node_lvs = self.rpc.call_lv_list([pnode.name],
1085
                                       vg_names.payload.keys())[pnode.name]
1086
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1087
      node_lvs = node_lvs.payload
1088

    
1089
      delta = all_lvs.difference(node_lvs.keys())
1090
      if delta:
1091
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1092
                                   utils.CommaJoin(delta),
1093
                                   errors.ECODE_INVAL)
1094
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1095
      if online_lvs:
1096
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1097
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1098
                                   errors.ECODE_STATE)
1099
      # update the size of disk based on what is found
1100
      for dsk in self.disks:
1101
        dsk[constants.IDISK_SIZE] = \
1102
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1103
                                        dsk[constants.IDISK_ADOPT])][0]))
1104

    
1105
    elif self.op.disk_template == constants.DT_BLOCK:
1106
      # Normalize and de-duplicate device paths
1107
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1108
                       for disk in self.disks])
1109
      if len(all_disks) != len(self.disks):
1110
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1111
                                   errors.ECODE_INVAL)
1112
      baddisks = [d for d in all_disks
1113
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1114
      if baddisks:
1115
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1116
                                   " cannot be adopted" %
1117
                                   (utils.CommaJoin(baddisks),
1118
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1119
                                   errors.ECODE_INVAL)
1120

    
1121
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
1122
                                            list(all_disks))[pnode.name]
1123
      node_disks.Raise("Cannot get block device information from node %s" %
1124
                       pnode.name)
1125
      node_disks = node_disks.payload
1126
      delta = all_disks.difference(node_disks.keys())
1127
      if delta:
1128
        raise errors.OpPrereqError("Missing block device(s): %s" %
1129
                                   utils.CommaJoin(delta),
1130
                                   errors.ECODE_INVAL)
1131
      for dsk in self.disks:
1132
        dsk[constants.IDISK_SIZE] = \
1133
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1134

    
1135
    # Verify instance specs
1136
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1137
    ispec = {
1138
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1139
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1140
      constants.ISPEC_DISK_COUNT: len(self.disks),
1141
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1142
                                  for disk in self.disks],
1143
      constants.ISPEC_NIC_COUNT: len(self.nics),
1144
      constants.ISPEC_SPINDLE_USE: spindle_use,
1145
      }
1146

    
1147
    group_info = self.cfg.GetNodeGroup(pnode.group)
1148
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1149
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1150
                                               self.op.disk_template)
1151
    if not self.op.ignore_ipolicy and res:
1152
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1153
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1154
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1155

    
1156
    CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1157

    
1158
    CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1159
    # check OS parameters (remotely)
1160
    CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1161

    
1162
    CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1163

    
1164
    #TODO: _CheckExtParams (remotely)
1165
    # Check parameters for extstorage
1166

    
1167
    # memory check on primary node
1168
    #TODO(dynmem): use MINMEM for checking
1169
    if self.op.start:
1170
      CheckNodeFreeMemory(self, self.pnode.name,
1171
                          "creating instance %s" % self.op.instance_name,
1172
                          self.be_full[constants.BE_MAXMEM],
1173
                          self.op.hypervisor)
1174

    
1175
    self.dry_run_result = list(nodenames)
1176

    
1177
  def Exec(self, feedback_fn):
1178
    """Create and add the instance to the cluster.
1179

1180
    """
1181
    instance = self.op.instance_name
1182
    pnode_name = self.pnode.name
1183

    
1184
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1185
                self.owned_locks(locking.LEVEL_NODE)), \
1186
      "Node locks differ from node resource locks"
1187
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1188

    
1189
    ht_kind = self.op.hypervisor
1190
    if ht_kind in constants.HTS_REQ_PORT:
1191
      network_port = self.cfg.AllocatePort()
1192
    else:
1193
      network_port = None
1194

    
1195
    # This is ugly but we got a chicken-egg problem here
1196
    # We can only take the group disk parameters, as the instance
1197
    # has no disks yet (we are generating them right here).
1198
    node = self.cfg.GetNodeInfo(pnode_name)
1199
    nodegroup = self.cfg.GetNodeGroup(node.group)
1200
    disks = GenerateDiskTemplate(self,
1201
                                 self.op.disk_template,
1202
                                 instance, pnode_name,
1203
                                 self.secondaries,
1204
                                 self.disks,
1205
                                 self.instance_file_storage_dir,
1206
                                 self.op.file_driver,
1207
                                 0,
1208
                                 feedback_fn,
1209
                                 self.cfg.GetGroupDiskParams(nodegroup))
1210

    
1211
    iobj = objects.Instance(name=instance, os=self.op.os_type,
1212
                            primary_node=pnode_name,
1213
                            nics=self.nics, disks=disks,
1214
                            disk_template=self.op.disk_template,
1215
                            disks_active=False,
1216
                            admin_state=constants.ADMINST_DOWN,
1217
                            network_port=network_port,
1218
                            beparams=self.op.beparams,
1219
                            hvparams=self.op.hvparams,
1220
                            hypervisor=self.op.hypervisor,
1221
                            osparams=self.op.osparams,
1222
                            )
1223

    
1224
    if self.op.tags:
1225
      for tag in self.op.tags:
1226
        iobj.AddTag(tag)
1227

    
1228
    if self.adopt_disks:
1229
      if self.op.disk_template == constants.DT_PLAIN:
1230
        # rename LVs to the newly-generated names; we need to construct
1231
        # 'fake' LV disks with the old data, plus the new unique_id
1232
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1233
        rename_to = []
1234
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1235
          rename_to.append(t_dsk.logical_id)
1236
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1237
          self.cfg.SetDiskID(t_dsk, pnode_name)
1238
        result = self.rpc.call_blockdev_rename(pnode_name,
1239
                                               zip(tmp_disks, rename_to))
1240
        result.Raise("Failed to rename adoped LVs")
1241
    else:
1242
      feedback_fn("* creating instance disks...")
1243
      try:
1244
        CreateDisks(self, iobj)
1245
      except errors.OpExecError:
1246
        self.LogWarning("Device creation failed")
1247
        self.cfg.ReleaseDRBDMinors(instance)
1248
        raise
1249

    
1250
    feedback_fn("adding instance %s to cluster config" % instance)
1251

    
1252
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1253

    
1254
    # Declare that we don't want to remove the instance lock anymore, as we've
1255
    # added the instance to the config
1256
    del self.remove_locks[locking.LEVEL_INSTANCE]
1257

    
1258
    if self.op.mode == constants.INSTANCE_IMPORT:
1259
      # Release unused nodes
1260
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1261
    else:
1262
      # Release all nodes
1263
      ReleaseLocks(self, locking.LEVEL_NODE)
1264

    
1265
    disk_abort = False
1266
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1267
      feedback_fn("* wiping instance disks...")
1268
      try:
1269
        WipeDisks(self, iobj)
1270
      except errors.OpExecError, err:
1271
        logging.exception("Wiping disks failed")
1272
        self.LogWarning("Wiping instance disks failed (%s)", err)
1273
        disk_abort = True
1274

    
1275
    if disk_abort:
1276
      # Something is already wrong with the disks, don't do anything else
1277
      pass
1278
    elif self.op.wait_for_sync:
1279
      disk_abort = not WaitForSync(self, iobj)
1280
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1281
      # make sure the disks are not degraded (still sync-ing is ok)
1282
      feedback_fn("* checking mirrors status")
1283
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1284
    else:
1285
      disk_abort = False
1286

    
1287
    if disk_abort:
1288
      RemoveDisks(self, iobj)
1289
      self.cfg.RemoveInstance(iobj.name)
1290
      # Make sure the instance lock gets removed
1291
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1292
      raise errors.OpExecError("There are some degraded disks for"
1293
                               " this instance")
1294

    
1295
    # instance disks are now active
1296
    iobj.disks_active = True
1297

    
1298
    # Release all node resource locks
1299
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1300

    
1301
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1302
      # we need to set the disks ID to the primary node, since the
1303
      # preceding code might or might have not done it, depending on
1304
      # disk template and other options
1305
      for disk in iobj.disks:
1306
        self.cfg.SetDiskID(disk, pnode_name)
1307
      if self.op.mode == constants.INSTANCE_CREATE:
1308
        if not self.op.no_install:
1309
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1310
                        not self.op.wait_for_sync)
1311
          if pause_sync:
1312
            feedback_fn("* pausing disk sync to install instance OS")
1313
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1314
                                                              (iobj.disks,
1315
                                                               iobj), True)
1316
            for idx, success in enumerate(result.payload):
1317
              if not success:
1318
                logging.warn("pause-sync of instance %s for disk %d failed",
1319
                             instance, idx)
1320

    
1321
          feedback_fn("* running the instance OS create scripts...")
1322
          # FIXME: pass debug option from opcode to backend
1323
          os_add_result = \
1324
            self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1325
                                          self.op.debug_level)
1326
          if pause_sync:
1327
            feedback_fn("* resuming disk sync")
1328
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1329
                                                              (iobj.disks,
1330
                                                               iobj), False)
1331
            for idx, success in enumerate(result.payload):
1332
              if not success:
1333
                logging.warn("resume-sync of instance %s for disk %d failed",
1334
                             instance, idx)
1335

    
1336
          os_add_result.Raise("Could not add os for instance %s"
1337
                              " on node %s" % (instance, pnode_name))
1338

    
1339
      else:
1340
        if self.op.mode == constants.INSTANCE_IMPORT:
1341
          feedback_fn("* running the instance OS import scripts...")
1342

    
1343
          transfers = []
1344

    
1345
          for idx, image in enumerate(self.src_images):
1346
            if not image:
1347
              continue
1348

    
1349
            # FIXME: pass debug option from opcode to backend
1350
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1351
                                               constants.IEIO_FILE, (image, ),
1352
                                               constants.IEIO_SCRIPT,
1353
                                               (iobj.disks[idx], idx),
1354
                                               None)
1355
            transfers.append(dt)
1356

    
1357
          import_result = \
1358
            masterd.instance.TransferInstanceData(self, feedback_fn,
1359
                                                  self.op.src_node, pnode_name,
1360
                                                  self.pnode.secondary_ip,
1361
                                                  iobj, transfers)
1362
          if not compat.all(import_result):
1363
            self.LogWarning("Some disks for instance %s on node %s were not"
1364
                            " imported successfully" % (instance, pnode_name))
1365

    
1366
          rename_from = self._old_instance_name
1367

    
1368
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1369
          feedback_fn("* preparing remote import...")
1370
          # The source cluster will stop the instance before attempting to make
1371
          # a connection. In some cases stopping an instance can take a long
1372
          # time, hence the shutdown timeout is added to the connection
1373
          # timeout.
1374
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1375
                             self.op.source_shutdown_timeout)
1376
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1377

    
1378
          assert iobj.primary_node == self.pnode.name
1379
          disk_results = \
1380
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1381
                                          self.source_x509_ca,
1382
                                          self._cds, timeouts)
1383
          if not compat.all(disk_results):
1384
            # TODO: Should the instance still be started, even if some disks
1385
            # failed to import (valid for local imports, too)?
1386
            self.LogWarning("Some disks for instance %s on node %s were not"
1387
                            " imported successfully" % (instance, pnode_name))
1388

    
1389
          rename_from = self.source_instance_name
1390

    
1391
        else:
1392
          # also checked in the prereq part
1393
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1394
                                       % self.op.mode)
1395

    
1396
        # Run rename script on newly imported instance
1397
        assert iobj.name == instance
1398
        feedback_fn("Running rename script for %s" % instance)
1399
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1400
                                                   rename_from,
1401
                                                   self.op.debug_level)
1402
        if result.fail_msg:
1403
          self.LogWarning("Failed to run rename script for %s on node"
1404
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
1405

    
1406
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1407

    
1408
    if self.op.start:
1409
      iobj.admin_state = constants.ADMINST_UP
1410
      self.cfg.Update(iobj, feedback_fn)
1411
      logging.info("Starting instance %s on node %s", instance, pnode_name)
1412
      feedback_fn("* starting instance...")
1413
      result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1414
                                            False, self.op.reason)
1415
      result.Raise("Could not start instance")
1416

    
1417
    return list(iobj.all_nodes)
1418

    
1419

    
1420
class LUInstanceRename(LogicalUnit):
1421
  """Rename an instance.
1422

1423
  """
1424
  HPATH = "instance-rename"
1425
  HTYPE = constants.HTYPE_INSTANCE
1426

    
1427
  def CheckArguments(self):
1428
    """Check arguments.
1429

1430
    """
1431
    if self.op.ip_check and not self.op.name_check:
1432
      # TODO: make the ip check more flexible and not depend on the name check
1433
      raise errors.OpPrereqError("IP address check requires a name check",
1434
                                 errors.ECODE_INVAL)
1435

    
1436
  def BuildHooksEnv(self):
1437
    """Build hooks env.
1438

1439
    This runs on master, primary and secondary nodes of the instance.
1440

1441
    """
1442
    env = BuildInstanceHookEnvByObject(self, self.instance)
1443
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1444
    return env
1445

    
1446
  def BuildHooksNodes(self):
1447
    """Build hooks nodes.
1448

1449
    """
1450
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1451
    return (nl, nl)
1452

    
1453
  def CheckPrereq(self):
1454
    """Check prerequisites.
1455

1456
    This checks that the instance is in the cluster and is not running.
1457

1458
    """
1459
    self.op.instance_name = ExpandInstanceName(self.cfg,
1460
                                               self.op.instance_name)
1461
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1462
    assert instance is not None
1463
    CheckNodeOnline(self, instance.primary_node)
1464
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1465
                       msg="cannot rename")
1466
    self.instance = instance
1467

    
1468
    new_name = self.op.new_name
1469
    if self.op.name_check:
1470
      hostname = _CheckHostnameSane(self, new_name)
1471
      new_name = self.op.new_name = hostname.name
1472
      if (self.op.ip_check and
1473
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1474
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1475
                                   (hostname.ip, new_name),
1476
                                   errors.ECODE_NOTUNIQUE)
1477

    
1478
    instance_list = self.cfg.GetInstanceList()
1479
    if new_name in instance_list and new_name != instance.name:
1480
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1481
                                 new_name, errors.ECODE_EXISTS)
1482

    
1483
  def Exec(self, feedback_fn):
1484
    """Rename the instance.
1485

1486
    """
1487
    inst = self.instance
1488
    old_name = inst.name
1489

    
1490
    rename_file_storage = False
1491
    if (inst.disk_template in constants.DTS_FILEBASED and
1492
        self.op.new_name != inst.name):
1493
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1494
      rename_file_storage = True
1495

    
1496
    self.cfg.RenameInstance(inst.name, self.op.new_name)
1497
    # Change the instance lock. This is definitely safe while we hold the BGL.
1498
    # Otherwise the new lock would have to be added in acquired mode.
1499
    assert self.REQ_BGL
1500
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1501
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1502
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1503

    
1504
    # re-read the instance from the configuration after rename
1505
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
1506

    
1507
    if rename_file_storage:
1508
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1509
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1510
                                                     old_file_storage_dir,
1511
                                                     new_file_storage_dir)
1512
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1513
                   " (but the instance has been renamed in Ganeti)" %
1514
                   (inst.primary_node, old_file_storage_dir,
1515
                    new_file_storage_dir))
1516

    
1517
    StartInstanceDisks(self, inst, None)
1518
    # update info on disks
1519
    info = GetInstanceInfoText(inst)
1520
    for (idx, disk) in enumerate(inst.disks):
1521
      for node in inst.all_nodes:
1522
        self.cfg.SetDiskID(disk, node)
1523
        result = self.rpc.call_blockdev_setinfo(node, disk, info)
1524
        if result.fail_msg:
1525
          self.LogWarning("Error setting info on node %s for disk %s: %s",
1526
                          node, idx, result.fail_msg)
1527
    try:
1528
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1529
                                                 old_name, self.op.debug_level)
1530
      msg = result.fail_msg
1531
      if msg:
1532
        msg = ("Could not run OS rename script for instance %s on node %s"
1533
               " (but the instance has been renamed in Ganeti): %s" %
1534
               (inst.name, inst.primary_node, msg))
1535
        self.LogWarning(msg)
1536
    finally:
1537
      ShutdownInstanceDisks(self, inst)
1538

    
1539
    return inst.name
1540

    
1541

    
1542
class LUInstanceRemove(LogicalUnit):
1543
  """Remove an instance.
1544

1545
  """
1546
  HPATH = "instance-remove"
1547
  HTYPE = constants.HTYPE_INSTANCE
1548
  REQ_BGL = False
1549

    
1550
  def ExpandNames(self):
1551
    self._ExpandAndLockInstance()
1552
    self.needed_locks[locking.LEVEL_NODE] = []
1553
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1554
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1555

    
1556
  def DeclareLocks(self, level):
1557
    if level == locking.LEVEL_NODE:
1558
      self._LockInstancesNodes()
1559
    elif level == locking.LEVEL_NODE_RES:
1560
      # Copy node locks
1561
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1562
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1563

    
1564
  def BuildHooksEnv(self):
1565
    """Build hooks env.
1566

1567
    This runs on master, primary and secondary nodes of the instance.
1568

1569
    """
1570
    env = BuildInstanceHookEnvByObject(self, self.instance)
1571
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1572
    return env
1573

    
1574
  def BuildHooksNodes(self):
1575
    """Build hooks nodes.
1576

1577
    """
1578
    nl = [self.cfg.GetMasterNode()]
1579
    nl_post = list(self.instance.all_nodes) + nl
1580
    return (nl, nl_post)
1581

    
1582
  def CheckPrereq(self):
1583
    """Check prerequisites.
1584

1585
    This checks that the instance is in the cluster.
1586

1587
    """
1588
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1589
    assert self.instance is not None, \
1590
      "Cannot retrieve locked instance %s" % self.op.instance_name
1591

    
1592
  def Exec(self, feedback_fn):
1593
    """Remove the instance.
1594

1595
    """
1596
    instance = self.instance
1597
    logging.info("Shutting down instance %s on node %s",
1598
                 instance.name, instance.primary_node)
1599

    
1600
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1601
                                             self.op.shutdown_timeout,
1602
                                             self.op.reason)
1603
    msg = result.fail_msg
1604
    if msg:
1605
      if self.op.ignore_failures:
1606
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
1607
      else:
1608
        raise errors.OpExecError("Could not shutdown instance %s on"
1609
                                 " node %s: %s" %
1610
                                 (instance.name, instance.primary_node, msg))
1611

    
1612
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1613
            self.owned_locks(locking.LEVEL_NODE_RES))
1614
    assert not (set(instance.all_nodes) -
1615
                self.owned_locks(locking.LEVEL_NODE)), \
1616
      "Not owning correct locks"
1617

    
1618
    RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1619

    
1620

    
1621
class LUInstanceMove(LogicalUnit):
1622
  """Move an instance by data-copying.
1623

1624
  """
1625
  HPATH = "instance-move"
1626
  HTYPE = constants.HTYPE_INSTANCE
1627
  REQ_BGL = False
1628

    
1629
  def ExpandNames(self):
1630
    self._ExpandAndLockInstance()
1631
    target_node = ExpandNodeName(self.cfg, self.op.target_node)
1632
    self.op.target_node = target_node
1633
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
1634
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1635
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1636

    
1637
  def DeclareLocks(self, level):
1638
    if level == locking.LEVEL_NODE:
1639
      self._LockInstancesNodes(primary_only=True)
1640
    elif level == locking.LEVEL_NODE_RES:
1641
      # Copy node locks
1642
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1643
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1644

    
1645
  def BuildHooksEnv(self):
1646
    """Build hooks env.
1647

1648
    This runs on master, primary and secondary nodes of the instance.
1649

1650
    """
1651
    env = {
1652
      "TARGET_NODE": self.op.target_node,
1653
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1654
      }
1655
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1656
    return env
1657

    
1658
  def BuildHooksNodes(self):
1659
    """Build hooks nodes.
1660

1661
    """
1662
    nl = [
1663
      self.cfg.GetMasterNode(),
1664
      self.instance.primary_node,
1665
      self.op.target_node,
1666
      ]
1667
    return (nl, nl)
1668

    
1669
  def CheckPrereq(self):
1670
    """Check prerequisites.
1671

1672
    This checks that the instance is in the cluster.
1673

1674
    """
1675
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1676
    assert self.instance is not None, \
1677
      "Cannot retrieve locked instance %s" % self.op.instance_name
1678

    
1679
    if instance.disk_template not in constants.DTS_COPYABLE:
1680
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1681
                                 instance.disk_template, errors.ECODE_STATE)
1682

    
1683
    node = self.cfg.GetNodeInfo(self.op.target_node)
1684
    assert node is not None, \
1685
      "Cannot retrieve locked node %s" % self.op.target_node
1686

    
1687
    self.target_node = target_node = node.name
1688

    
1689
    if target_node == instance.primary_node:
1690
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1691
                                 (instance.name, target_node),
1692
                                 errors.ECODE_STATE)
1693

    
1694
    bep = self.cfg.GetClusterInfo().FillBE(instance)
1695

    
1696
    for idx, dsk in enumerate(instance.disks):
1697
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1698
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1699
                                   " cannot copy" % idx, errors.ECODE_STATE)
1700

    
1701
    CheckNodeOnline(self, target_node)
1702
    CheckNodeNotDrained(self, target_node)
1703
    CheckNodeVmCapable(self, target_node)
1704
    cluster = self.cfg.GetClusterInfo()
1705
    group_info = self.cfg.GetNodeGroup(node.group)
1706
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1707
    CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1708
                           ignore=self.op.ignore_ipolicy)
1709

    
1710
    if instance.admin_state == constants.ADMINST_UP:
1711
      # check memory requirements on the secondary node
1712
      CheckNodeFreeMemory(self, target_node,
1713
                          "failing over instance %s" %
1714
                          instance.name, bep[constants.BE_MAXMEM],
1715
                          instance.hypervisor)
1716
    else:
1717
      self.LogInfo("Not checking memory on the secondary node as"
1718
                   " instance will not be started")
1719

    
1720
    # check bridge existance
1721
    CheckInstanceBridgesExist(self, instance, node=target_node)
1722

    
1723
  def Exec(self, feedback_fn):
1724
    """Move an instance.
1725

1726
    The move is done by shutting it down on its present node, copying
1727
    the data over (slow) and starting it on the new node.
1728

1729
    """
1730
    instance = self.instance
1731

    
1732
    source_node = instance.primary_node
1733
    target_node = self.target_node
1734

    
1735
    self.LogInfo("Shutting down instance %s on source node %s",
1736
                 instance.name, source_node)
1737

    
1738
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1739
            self.owned_locks(locking.LEVEL_NODE_RES))
1740

    
1741
    result = self.rpc.call_instance_shutdown(source_node, instance,
1742
                                             self.op.shutdown_timeout,
1743
                                             self.op.reason)
1744
    msg = result.fail_msg
1745
    if msg:
1746
      if self.op.ignore_consistency:
1747
        self.LogWarning("Could not shutdown instance %s on node %s."
1748
                        " Proceeding anyway. Please make sure node"
1749
                        " %s is down. Error details: %s",
1750
                        instance.name, source_node, source_node, msg)
1751
      else:
1752
        raise errors.OpExecError("Could not shutdown instance %s on"
1753
                                 " node %s: %s" %
1754
                                 (instance.name, source_node, msg))
1755

    
1756
    # create the target disks
1757
    try:
1758
      CreateDisks(self, instance, target_node=target_node)
1759
    except errors.OpExecError:
1760
      self.LogWarning("Device creation failed")
1761
      self.cfg.ReleaseDRBDMinors(instance.name)
1762
      raise
1763

    
1764
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1765

    
1766
    errs = []
1767
    # activate, get path, copy the data over
1768
    for idx, disk in enumerate(instance.disks):
1769
      self.LogInfo("Copying data for disk %d", idx)
1770
      result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1771
                                               instance.name, True, idx)
1772
      if result.fail_msg:
1773
        self.LogWarning("Can't assemble newly created disk %d: %s",
1774
                        idx, result.fail_msg)
1775
        errs.append(result.fail_msg)
1776
        break
1777
      dev_path, _ = result.payload
1778
      result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1779
                                             target_node, dev_path,
1780
                                             cluster_name)
1781
      if result.fail_msg:
1782
        self.LogWarning("Can't copy data over for disk %d: %s",
1783
                        idx, result.fail_msg)
1784
        errs.append(result.fail_msg)
1785
        break
1786

    
1787
    if errs:
1788
      self.LogWarning("Some disks failed to copy, aborting")
1789
      try:
1790
        RemoveDisks(self, instance, target_node=target_node)
1791
      finally:
1792
        self.cfg.ReleaseDRBDMinors(instance.name)
1793
        raise errors.OpExecError("Errors during disk copy: %s" %
1794
                                 (",".join(errs),))
1795

    
1796
    instance.primary_node = target_node
1797
    self.cfg.Update(instance, feedback_fn)
1798

    
1799
    self.LogInfo("Removing the disks on the original node")
1800
    RemoveDisks(self, instance, target_node=source_node)
1801

    
1802
    # Only start the instance if it's marked as up
1803
    if instance.admin_state == constants.ADMINST_UP:
1804
      self.LogInfo("Starting instance %s on node %s",
1805
                   instance.name, target_node)
1806

    
1807
      disks_ok, _ = AssembleInstanceDisks(self, instance,
1808
                                          ignore_secondaries=True)
1809
      if not disks_ok:
1810
        ShutdownInstanceDisks(self, instance)
1811
        raise errors.OpExecError("Can't activate the instance's disks")
1812

    
1813
      result = self.rpc.call_instance_start(target_node,
1814
                                            (instance, None, None), False,
1815
                                            self.op.reason)
1816
      msg = result.fail_msg
1817
      if msg:
1818
        ShutdownInstanceDisks(self, instance)
1819
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1820
                                 (instance.name, target_node, msg))
1821

    
1822

    
1823
class LUInstanceMultiAlloc(NoHooksLU):
1824
  """Allocates multiple instances at the same time.
1825

1826
  """
1827
  REQ_BGL = False
1828

    
1829
  def CheckArguments(self):
1830
    """Check arguments.
1831

1832
    """
1833
    nodes = []
1834
    for inst in self.op.instances:
1835
      if inst.iallocator is not None:
1836
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1837
                                   " instance objects", errors.ECODE_INVAL)
1838
      nodes.append(bool(inst.pnode))
1839
      if inst.disk_template in constants.DTS_INT_MIRROR:
1840
        nodes.append(bool(inst.snode))
1841

    
1842
    has_nodes = compat.any(nodes)
1843
    if compat.all(nodes) ^ has_nodes:
1844
      raise errors.OpPrereqError("There are instance objects providing"
1845
                                 " pnode/snode while others do not",
1846
                                 errors.ECODE_INVAL)
1847

    
1848
    if not has_nodes and self.op.iallocator is None:
1849
      default_iallocator = self.cfg.GetDefaultIAllocator()
1850
      if default_iallocator:
1851
        self.op.iallocator = default_iallocator
1852
      else:
1853
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1854
                                   " given and no cluster-wide default"
1855
                                   " iallocator found; please specify either"
1856
                                   " an iallocator or nodes on the instances"
1857
                                   " or set a cluster-wide default iallocator",
1858
                                   errors.ECODE_INVAL)
1859

    
1860
    _CheckOpportunisticLocking(self.op)
1861

    
1862
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1863
    if dups:
1864
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1865
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1866

    
1867
  def ExpandNames(self):
1868
    """Calculate the locks.
1869

1870
    """
1871
    self.share_locks = ShareAll()
1872
    self.needed_locks = {
1873
      # iallocator will select nodes and even if no iallocator is used,
1874
      # collisions with LUInstanceCreate should be avoided
1875
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1876
      }
1877

    
1878
    if self.op.iallocator:
1879
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1880
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1881

    
1882
      if self.op.opportunistic_locking:
1883
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1884
    else:
1885
      nodeslist = []
1886
      for inst in self.op.instances:
1887
        inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1888
        nodeslist.append(inst.pnode)
1889
        if inst.snode is not None:
1890
          inst.snode = ExpandNodeName(self.cfg, inst.snode)
1891
          nodeslist.append(inst.snode)
1892

    
1893
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1894
      # Lock resources of instance's primary and secondary nodes (copy to
1895
      # prevent accidential modification)
1896
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1897

    
1898
  def DeclareLocks(self, level):
1899
    if level == locking.LEVEL_NODE_RES and \
1900
      self.opportunistic_locks[locking.LEVEL_NODE]:
1901
      # Even when using opportunistic locking, we require the same set of
1902
      # NODE_RES locks as we got NODE locks
1903
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1904
        self.owned_locks(locking.LEVEL_NODE)
1905

    
1906
  def CheckPrereq(self):
1907
    """Check prerequisite.
1908

1909
    """
1910
    if self.op.iallocator:
1911
      cluster = self.cfg.GetClusterInfo()
1912
      default_vg = self.cfg.GetVGName()
1913
      ec_id = self.proc.GetECId()
1914

    
1915
      if self.op.opportunistic_locking:
1916
        # Only consider nodes for which a lock is held
1917
        node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1918
      else:
1919
        node_whitelist = None
1920

    
1921
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1922
                                           _ComputeNics(op, cluster, None,
1923
                                                        self.cfg, ec_id),
1924
                                           _ComputeFullBeParams(op, cluster),
1925
                                           node_whitelist)
1926
               for op in self.op.instances]
1927

    
1928
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1929
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1930

    
1931
      ial.Run(self.op.iallocator)
1932

    
1933
      if not ial.success:
1934
        raise errors.OpPrereqError("Can't compute nodes using"
1935
                                   " iallocator '%s': %s" %
1936
                                   (self.op.iallocator, ial.info),
1937
                                   errors.ECODE_NORES)
1938

    
1939
      self.ia_result = ial.result
1940

    
1941
    if self.op.dry_run:
1942
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1943
        constants.JOB_IDS_KEY: [],
1944
        })
1945

    
1946
  def _ConstructPartialResult(self):
1947
    """Contructs the partial result.
1948

1949
    """
1950
    if self.op.iallocator:
1951
      (allocatable, failed_insts) = self.ia_result
1952
      allocatable_insts = map(compat.fst, allocatable)
1953
    else:
1954
      allocatable_insts = [op.instance_name for op in self.op.instances]
1955
      failed_insts = []
1956

    
1957
    return {
1958
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
1959
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
1960
      }
1961

    
1962
  def Exec(self, feedback_fn):
1963
    """Executes the opcode.
1964

1965
    """
1966
    jobs = []
1967
    if self.op.iallocator:
1968
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
1969
      (allocatable, failed) = self.ia_result
1970

    
1971
      for (name, nodes) in allocatable:
1972
        op = op2inst.pop(name)
1973

    
1974
        if len(nodes) > 1:
1975
          (op.pnode, op.snode) = nodes
1976
        else:
1977
          (op.pnode,) = nodes
1978

    
1979
        jobs.append([op])
1980

    
1981
      missing = set(op2inst.keys()) - set(failed)
1982
      assert not missing, \
1983
        "Iallocator did return incomplete result: %s" % \
1984
        utils.CommaJoin(missing)
1985
    else:
1986
      jobs.extend([op] for op in self.op.instances)
1987

    
1988
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
1989

    
1990

    
1991
class _InstNicModPrivate:
1992
  """Data structure for network interface modifications.
1993

1994
  Used by L{LUInstanceSetParams}.
1995

1996
  """
1997
  def __init__(self):
1998
    self.params = None
1999
    self.filled = None
2000

    
2001

    
2002
def _PrepareContainerMods(mods, private_fn):
2003
  """Prepares a list of container modifications by adding a private data field.
2004

2005
  @type mods: list of tuples; (operation, index, parameters)
2006
  @param mods: List of modifications
2007
  @type private_fn: callable or None
2008
  @param private_fn: Callable for constructing a private data field for a
2009
    modification
2010
  @rtype: list
2011

2012
  """
2013
  if private_fn is None:
2014
    fn = lambda: None
2015
  else:
2016
    fn = private_fn
2017

    
2018
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2019

    
2020

    
2021
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
2022
  """Checks if nodes have enough physical CPUs
2023

2024
  This function checks if all given nodes have the needed number of
2025
  physical CPUs. In case any node has less CPUs or we cannot get the
2026
  information from the node, this function raises an OpPrereqError
2027
  exception.
2028

2029
  @type lu: C{LogicalUnit}
2030
  @param lu: a logical unit from which we get configuration data
2031
  @type nodenames: C{list}
2032
  @param nodenames: the list of node names to check
2033
  @type requested: C{int}
2034
  @param requested: the minimum acceptable number of physical CPUs
2035
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2036
      or we cannot check the node
2037

2038
  """
2039
  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2040
  for node in nodenames:
2041
    info = nodeinfo[node]
2042
    info.Raise("Cannot get current information from node %s" % node,
2043
               prereq=True, ecode=errors.ECODE_ENVIRON)
2044
    (_, _, (hv_info, )) = info.payload
2045
    num_cpus = hv_info.get("cpu_total", None)
2046
    if not isinstance(num_cpus, int):
2047
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2048
                                 " on node %s, result was '%s'" %
2049
                                 (node, num_cpus), errors.ECODE_ENVIRON)
2050
    if requested > num_cpus:
2051
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2052
                                 "required" % (node, num_cpus, requested),
2053
                                 errors.ECODE_NORES)
2054

    
2055

    
2056
def GetItemFromContainer(identifier, kind, container):
2057
  """Return the item refered by the identifier.
2058

2059
  @type identifier: string
2060
  @param identifier: Item index or name or UUID
2061
  @type kind: string
2062
  @param kind: One-word item description
2063
  @type container: list
2064
  @param container: Container to get the item from
2065

2066
  """
2067
  # Index
2068
  try:
2069
    idx = int(identifier)
2070
    if idx == -1:
2071
      # Append
2072
      absidx = len(container) - 1
2073
    elif idx < 0:
2074
      raise IndexError("Not accepting negative indices other than -1")
2075
    elif idx > len(container):
2076
      raise IndexError("Got %s index %s, but there are only %s" %
2077
                       (kind, idx, len(container)))
2078
    else:
2079
      absidx = idx
2080
    return (absidx, container[idx])
2081
  except ValueError:
2082
    pass
2083

    
2084
  for idx, item in enumerate(container):
2085
    if item.uuid == identifier or item.name == identifier:
2086
      return (idx, item)
2087

    
2088
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2089
                             (kind, identifier), errors.ECODE_NOENT)
2090

    
2091

    
2092
def _ApplyContainerMods(kind, container, chgdesc, mods,
2093
                        create_fn, modify_fn, remove_fn):
2094
  """Applies descriptions in C{mods} to C{container}.
2095

2096
  @type kind: string
2097
  @param kind: One-word item description
2098
  @type container: list
2099
  @param container: Container to modify
2100
  @type chgdesc: None or list
2101
  @param chgdesc: List of applied changes
2102
  @type mods: list
2103
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2104
  @type create_fn: callable
2105
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2106
    receives absolute item index, parameters and private data object as added
2107
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2108
    as list
2109
  @type modify_fn: callable
2110
  @param modify_fn: Callback for modifying an existing item
2111
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2112
    and private data object as added by L{_PrepareContainerMods}, returns
2113
    changes as list
2114
  @type remove_fn: callable
2115
  @param remove_fn: Callback on removing item; receives absolute item index,
2116
    item and private data object as added by L{_PrepareContainerMods}
2117

2118
  """
2119
  for (op, identifier, params, private) in mods:
2120
    changes = None
2121

    
2122
    if op == constants.DDM_ADD:
2123
      # Calculate where item will be added
2124
      # When adding an item, identifier can only be an index
2125
      try:
2126
        idx = int(identifier)
2127
      except ValueError:
2128
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2129
                                   " identifier for %s" % constants.DDM_ADD,
2130
                                   errors.ECODE_INVAL)
2131
      if idx == -1:
2132
        addidx = len(container)
2133
      else:
2134
        if idx < 0:
2135
          raise IndexError("Not accepting negative indices other than -1")
2136
        elif idx > len(container):
2137
          raise IndexError("Got %s index %s, but there are only %s" %
2138
                           (kind, idx, len(container)))
2139
        addidx = idx
2140

    
2141
      if create_fn is None:
2142
        item = params
2143
      else:
2144
        (item, changes) = create_fn(addidx, params, private)
2145

    
2146
      if idx == -1:
2147
        container.append(item)
2148
      else:
2149
        assert idx >= 0
2150
        assert idx <= len(container)
2151
        # list.insert does so before the specified index
2152
        container.insert(idx, item)
2153
    else:
2154
      # Retrieve existing item
2155
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2156

    
2157
      if op == constants.DDM_REMOVE:
2158
        assert not params
2159

    
2160
        if remove_fn is not None:
2161
          remove_fn(absidx, item, private)
2162

    
2163
        changes = [("%s/%s" % (kind, absidx), "remove")]
2164

    
2165
        assert container[absidx] == item
2166
        del container[absidx]
2167
      elif op == constants.DDM_MODIFY:
2168
        if modify_fn is not None:
2169
          changes = modify_fn(absidx, item, params, private)
2170
      else:
2171
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2172

    
2173
    assert _TApplyContModsCbChanges(changes)
2174

    
2175
    if not (chgdesc is None or changes is None):
2176
      chgdesc.extend(changes)
2177

    
2178

    
2179
def _UpdateIvNames(base_index, disks):
2180
  """Updates the C{iv_name} attribute of disks.
2181

2182
  @type disks: list of L{objects.Disk}
2183

2184
  """
2185
  for (idx, disk) in enumerate(disks):
2186
    disk.iv_name = "disk/%s" % (base_index + idx, )
2187

    
2188

    
2189
class LUInstanceSetParams(LogicalUnit):
2190
  """Modifies an instances's parameters.
2191

2192
  """
2193
  HPATH = "instance-modify"
2194
  HTYPE = constants.HTYPE_INSTANCE
2195
  REQ_BGL = False
2196

    
2197
  @staticmethod
2198
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2199
    assert ht.TList(mods)
2200
    assert not mods or len(mods[0]) in (2, 3)
2201

    
2202
    if mods and len(mods[0]) == 2:
2203
      result = []
2204

    
2205
      addremove = 0
2206
      for op, params in mods:
2207
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2208
          result.append((op, -1, params))
2209
          addremove += 1
2210

    
2211
          if addremove > 1:
2212
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2213
                                       " supported at a time" % kind,
2214
                                       errors.ECODE_INVAL)
2215
        else:
2216
          result.append((constants.DDM_MODIFY, op, params))
2217

    
2218
      assert verify_fn(result)
2219
    else:
2220
      result = mods
2221

    
2222
    return result
2223

    
2224
  @staticmethod
2225
  def _CheckMods(kind, mods, key_types, item_fn):
2226
    """Ensures requested disk/NIC modifications are valid.
2227

2228
    """
2229
    for (op, _, params) in mods:
2230
      assert ht.TDict(params)
2231

    
2232
      # If 'key_types' is an empty dict, we assume we have an
2233
      # 'ext' template and thus do not ForceDictType
2234
      if key_types:
2235
        utils.ForceDictType(params, key_types)
2236

    
2237
      if op == constants.DDM_REMOVE:
2238
        if params:
2239
          raise errors.OpPrereqError("No settings should be passed when"
2240
                                     " removing a %s" % kind,
2241
                                     errors.ECODE_INVAL)
2242
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2243
        item_fn(op, params)
2244
      else:
2245
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2246

    
2247
  @staticmethod
2248
  def _VerifyDiskModification(op, params):
2249
    """Verifies a disk modification.
2250

2251
    """
2252
    if op == constants.DDM_ADD:
2253
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2254
      if mode not in constants.DISK_ACCESS_SET:
2255
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2256
                                   errors.ECODE_INVAL)
2257

    
2258
      size = params.get(constants.IDISK_SIZE, None)
2259
      if size is None:
2260
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2261
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2262

    
2263
      try:
2264
        size = int(size)
2265
      except (TypeError, ValueError), err:
2266
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2267
                                   errors.ECODE_INVAL)
2268

    
2269
      params[constants.IDISK_SIZE] = size
2270
      name = params.get(constants.IDISK_NAME, None)
2271
      if name is not None and name.lower() == constants.VALUE_NONE:
2272
        params[constants.IDISK_NAME] = None
2273

    
2274
    elif op == constants.DDM_MODIFY:
2275
      if constants.IDISK_SIZE in params:
2276
        raise errors.OpPrereqError("Disk size change not possible, use"
2277
                                   " grow-disk", errors.ECODE_INVAL)
2278
      name = params.get(constants.IDISK_NAME, None)
2279
      if name is not None and name.lower() == constants.VALUE_NONE:
2280
        params[constants.IDISK_NAME] = None
2281

    
2282
  @staticmethod
2283
  def _VerifyNicModification(op, params):
2284
    """Verifies a network interface modification.
2285

2286
    """
2287
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2288
      ip = params.get(constants.INIC_IP, None)
2289
      name = params.get(constants.INIC_NAME, None)
2290
      req_net = params.get(constants.INIC_NETWORK, None)
2291
      link = params.get(constants.NIC_LINK, None)
2292
      mode = params.get(constants.NIC_MODE, None)
2293
      if name is not None and name.lower() == constants.VALUE_NONE:
2294
        params[constants.INIC_NAME] = None
2295
      if req_net is not None:
2296
        if req_net.lower() == constants.VALUE_NONE:
2297
          params[constants.INIC_NETWORK] = None
2298
          req_net = None
2299
        elif link is not None or mode is not None:
2300
          raise errors.OpPrereqError("If network is given"
2301
                                     " mode or link should not",
2302
                                     errors.ECODE_INVAL)
2303

    
2304
      if op == constants.DDM_ADD:
2305
        macaddr = params.get(constants.INIC_MAC, None)
2306
        if macaddr is None:
2307
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2308

    
2309
      if ip is not None:
2310
        if ip.lower() == constants.VALUE_NONE:
2311
          params[constants.INIC_IP] = None
2312
        else:
2313
          if ip.lower() == constants.NIC_IP_POOL:
2314
            if op == constants.DDM_ADD and req_net is None:
2315
              raise errors.OpPrereqError("If ip=pool, parameter network"
2316
                                         " cannot be none",
2317
                                         errors.ECODE_INVAL)
2318
          else:
2319
            if not netutils.IPAddress.IsValid(ip):
2320
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2321
                                         errors.ECODE_INVAL)
2322

    
2323
      if constants.INIC_MAC in params:
2324
        macaddr = params[constants.INIC_MAC]
2325
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2326
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2327

    
2328
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2329
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2330
                                     " modifying an existing NIC",
2331
                                     errors.ECODE_INVAL)
2332

    
2333
  def CheckArguments(self):
2334
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2335
            self.op.hvparams or self.op.beparams or self.op.os_name or
2336
            self.op.osparams or self.op.offline is not None or
2337
            self.op.runtime_mem or self.op.pnode):
2338
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2339

    
2340
    if self.op.hvparams:
2341
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2342
                           "hypervisor", "instance", "cluster")
2343

    
2344
    self.op.disks = self._UpgradeDiskNicMods(
2345
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2346
    self.op.nics = self._UpgradeDiskNicMods(
2347
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2348

    
2349
    if self.op.disks and self.op.disk_template is not None:
2350
      raise errors.OpPrereqError("Disk template conversion and other disk"
2351
                                 " changes not supported at the same time",
2352
                                 errors.ECODE_INVAL)
2353

    
2354
    if (self.op.disk_template and
2355
        self.op.disk_template in constants.DTS_INT_MIRROR and
2356
        self.op.remote_node is None):
2357
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2358
                                 " one requires specifying a secondary node",
2359
                                 errors.ECODE_INVAL)
2360

    
2361
    # Check NIC modifications
2362
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2363
                    self._VerifyNicModification)
2364

    
2365
    if self.op.pnode:
2366
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2367

    
2368
  def ExpandNames(self):
2369
    self._ExpandAndLockInstance()
2370
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2371
    # Can't even acquire node locks in shared mode as upcoming changes in
2372
    # Ganeti 2.6 will start to modify the node object on disk conversion
2373
    self.needed_locks[locking.LEVEL_NODE] = []
2374
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2375
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2376
    # Look node group to look up the ipolicy
2377
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2378

    
2379
  def DeclareLocks(self, level):
2380
    if level == locking.LEVEL_NODEGROUP:
2381
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2382
      # Acquire locks for the instance's nodegroups optimistically. Needs
2383
      # to be verified in CheckPrereq
2384
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2385
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2386
    elif level == locking.LEVEL_NODE:
2387
      self._LockInstancesNodes()
2388
      if self.op.disk_template and self.op.remote_node:
2389
        self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2390
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2391
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2392
      # Copy node locks
2393
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2394
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2395

    
2396
  def BuildHooksEnv(self):
2397
    """Build hooks env.
2398

2399
    This runs on the master, primary and secondaries.
2400

2401
    """
2402
    args = {}
2403
    if constants.BE_MINMEM in self.be_new:
2404
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2405
    if constants.BE_MAXMEM in self.be_new:
2406
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2407
    if constants.BE_VCPUS in self.be_new:
2408
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2409
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2410
    # information at all.
2411

    
2412
    if self._new_nics is not None:
2413
      nics = []
2414

    
2415
      for nic in self._new_nics:
2416
        n = copy.deepcopy(nic)
2417
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2418
        n.nicparams = nicparams
2419
        nics.append(NICToTuple(self, n))
2420

    
2421
      args["nics"] = nics
2422

    
2423
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2424
    if self.op.disk_template:
2425
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2426
    if self.op.runtime_mem:
2427
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2428

    
2429
    return env
2430

    
2431
  def BuildHooksNodes(self):
2432
    """Build hooks nodes.
2433

2434
    """
2435
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2436
    return (nl, nl)
2437

    
2438
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2439
                              old_params, cluster, pnode):
2440

    
2441
    update_params_dict = dict([(key, params[key])
2442
                               for key in constants.NICS_PARAMETERS
2443
                               if key in params])
2444

    
2445
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2446
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2447

    
2448
    new_net_uuid = None
2449
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2450
    if new_net_uuid_or_name:
2451
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2452
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2453

    
2454
    if old_net_uuid:
2455
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2456

    
2457
    if new_net_uuid:
2458
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2459
      if not netparams:
2460
        raise errors.OpPrereqError("No netparams found for the network"
2461
                                   " %s, probably not connected" %
2462
                                   new_net_obj.name, errors.ECODE_INVAL)
2463
      new_params = dict(netparams)
2464
    else:
2465
      new_params = GetUpdatedParams(old_params, update_params_dict)
2466

    
2467
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2468

    
2469
    new_filled_params = cluster.SimpleFillNIC(new_params)
2470
    objects.NIC.CheckParameterSyntax(new_filled_params)
2471

    
2472
    new_mode = new_filled_params[constants.NIC_MODE]
2473
    if new_mode == constants.NIC_MODE_BRIDGED:
2474
      bridge = new_filled_params[constants.NIC_LINK]
2475
      msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2476
      if msg:
2477
        msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2478
        if self.op.force:
2479
          self.warn.append(msg)
2480
        else:
2481
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2482

    
2483
    elif new_mode == constants.NIC_MODE_ROUTED:
2484
      ip = params.get(constants.INIC_IP, old_ip)
2485
      if ip is None:
2486
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2487
                                   " on a routed NIC", errors.ECODE_INVAL)
2488

    
2489
    elif new_mode == constants.NIC_MODE_OVS:
2490
      # TODO: check OVS link
2491
      self.LogInfo("OVS links are currently not checked for correctness")
2492

    
2493
    if constants.INIC_MAC in params:
2494
      mac = params[constants.INIC_MAC]
2495
      if mac is None:
2496
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2497
                                   errors.ECODE_INVAL)
2498
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2499
        # otherwise generate the MAC address
2500
        params[constants.INIC_MAC] = \
2501
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2502
      else:
2503
        # or validate/reserve the current one
2504
        try:
2505
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2506
        except errors.ReservationError:
2507
          raise errors.OpPrereqError("MAC address '%s' already in use"
2508
                                     " in cluster" % mac,
2509
                                     errors.ECODE_NOTUNIQUE)
2510
    elif new_net_uuid != old_net_uuid:
2511

    
2512
      def get_net_prefix(net_uuid):
2513
        mac_prefix = None
2514
        if net_uuid:
2515
          nobj = self.cfg.GetNetwork(net_uuid)
2516
          mac_prefix = nobj.mac_prefix
2517

    
2518
        return mac_prefix
2519

    
2520
      new_prefix = get_net_prefix(new_net_uuid)
2521
      old_prefix = get_net_prefix(old_net_uuid)
2522
      if old_prefix != new_prefix:
2523
        params[constants.INIC_MAC] = \
2524
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2525

    
2526
    # if there is a change in (ip, network) tuple
2527
    new_ip = params.get(constants.INIC_IP, old_ip)
2528
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2529
      if new_ip:
2530
        # if IP is pool then require a network and generate one IP
2531
        if new_ip.lower() == constants.NIC_IP_POOL:
2532
          if new_net_uuid:
2533
            try:
2534
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2535
            except errors.ReservationError:
2536
              raise errors.OpPrereqError("Unable to get a free IP"
2537
                                         " from the address pool",
2538
                                         errors.ECODE_STATE)
2539
            self.LogInfo("Chose IP %s from network %s",
2540
                         new_ip,
2541
                         new_net_obj.name)
2542
            params[constants.INIC_IP] = new_ip
2543
          else:
2544
            raise errors.OpPrereqError("ip=pool, but no network found",
2545
                                       errors.ECODE_INVAL)
2546
        # Reserve new IP if in the new network if any
2547
        elif new_net_uuid:
2548
          try:
2549
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2550
                               check=self.op.conflicts_check)
2551
            self.LogInfo("Reserving IP %s in network %s",
2552
                         new_ip, new_net_obj.name)
2553
          except errors.ReservationError:
2554
            raise errors.OpPrereqError("IP %s not available in network %s" %
2555
                                       (new_ip, new_net_obj.name),
2556
                                       errors.ECODE_NOTUNIQUE)
2557
        # new network is None so check if new IP is a conflicting IP
2558
        elif self.op.conflicts_check:
2559
          _CheckForConflictingIp(self, new_ip, pnode)
2560

    
2561
      # release old IP if old network is not None
2562
      if old_ip and old_net_uuid:
2563
        try:
2564
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2565
        except errors.AddressPoolError:
2566
          logging.warning("Release IP %s not contained in network %s",
2567
                          old_ip, old_net_obj.name)
2568

    
2569
    # there are no changes in (ip, network) tuple and old network is not None
2570
    elif (old_net_uuid is not None and
2571
          (req_link is not None or req_mode is not None)):
2572
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2573
                                 " a NIC that is connected to a network",
2574
                                 errors.ECODE_INVAL)
2575

    
2576
    private.params = new_params
2577
    private.filled = new_filled_params
2578

    
2579
  def _PreCheckDiskTemplate(self, pnode_info):
2580
    """CheckPrereq checks related to a new disk template."""
2581
    # Arguments are passed to avoid configuration lookups
2582
    instance = self.instance
2583
    pnode = instance.primary_node
2584
    cluster = self.cluster
2585
    if instance.disk_template == self.op.disk_template:
2586
      raise errors.OpPrereqError("Instance already has disk template %s" %
2587
                                 instance.disk_template, errors.ECODE_INVAL)
2588

    
2589
    if (instance.disk_template,
2590
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2591
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2592
                                 " %s to %s" % (instance.disk_template,
2593
                                                self.op.disk_template),
2594
                                 errors.ECODE_INVAL)
2595
    CheckInstanceState(self, instance, INSTANCE_DOWN,
2596
                       msg="cannot change disk template")
2597
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2598
      if self.op.remote_node == pnode:
2599
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2600
                                   " as the primary node of the instance" %
2601
                                   self.op.remote_node, errors.ECODE_STATE)
2602
      CheckNodeOnline(self, self.op.remote_node)
2603
      CheckNodeNotDrained(self, self.op.remote_node)
2604
      # FIXME: here we assume that the old instance type is DT_PLAIN
2605
      assert instance.disk_template == constants.DT_PLAIN
2606
      disks = [{constants.IDISK_SIZE: d.size,
2607
                constants.IDISK_VG: d.logical_id[0]}
2608
               for d in instance.disks]
2609
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2610
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2611

    
2612
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2613
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2614
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2615
                                                              snode_group)
2616
      CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2617
                             ignore=self.op.ignore_ipolicy)
2618
      if pnode_info.group != snode_info.group:
2619
        self.LogWarning("The primary and secondary nodes are in two"
2620
                        " different node groups; the disk parameters"
2621
                        " from the first disk's node group will be"
2622
                        " used")
2623

    
2624
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2625
      # Make sure none of the nodes require exclusive storage
2626
      nodes = [pnode_info]
2627
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2628
        assert snode_info
2629
        nodes.append(snode_info)
2630
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2631
      if compat.any(map(has_es, nodes)):
2632
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2633
                  " storage is enabled" % (instance.disk_template,
2634
                                           self.op.disk_template))
2635
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2636

    
2637
  def CheckPrereq(self): # pylint: disable=R0914
2638
    """Check prerequisites.
2639

2640
    This only checks the instance list against the existing names.
2641

2642
    """
2643
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2644
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2645

    
2646
    cluster = self.cluster = self.cfg.GetClusterInfo()
2647
    assert self.instance is not None, \
2648
      "Cannot retrieve locked instance %s" % self.op.instance_name
2649

    
2650
    pnode = instance.primary_node
2651

    
2652
    self.warn = []
2653

    
2654
    if (self.op.pnode is not None and self.op.pnode != pnode and
2655
        not self.op.force):
2656
      # verify that the instance is not up
2657
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2658
                                                  instance.hypervisor)
2659
      if instance_info.fail_msg:
2660
        self.warn.append("Can't get instance runtime information: %s" %
2661
                         instance_info.fail_msg)
2662
      elif instance_info.payload:
2663
        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2664
                                   errors.ECODE_STATE)
2665

    
2666
    assert pnode in self.owned_locks(locking.LEVEL_NODE)
2667
    nodelist = list(instance.all_nodes)
2668
    pnode_info = self.cfg.GetNodeInfo(pnode)
2669
    self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2670

    
2671
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2672
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2673
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2674

    
2675
    # dictionary with instance information after the modification
2676
    ispec = {}
2677

    
2678
    if self.op.hotplug or self.op.hotplug_if_possible:
2679
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2680
                                               self.instance)
2681
      if result.fail_msg:
2682
        if self.op.hotplug:
2683
          result.Raise("Hotplug is not possible: %s" % result.fail_msg,
2684
                       prereq=True)
2685
        else:
2686
          self.LogWarning(result.fail_msg)
2687
          self.op.hotplug = False
2688
          self.LogInfo("Modification will take place without hotplugging.")
2689
      else:
2690
        self.op.hotplug = True
2691

    
2692
    # Check disk modifications. This is done here and not in CheckArguments
2693
    # (as with NICs), because we need to know the instance's disk template
2694
    if instance.disk_template == constants.DT_EXT:
2695
      self._CheckMods("disk", self.op.disks, {},
2696
                      self._VerifyDiskModification)
2697
    else:
2698
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2699
                      self._VerifyDiskModification)
2700

    
2701
    # Prepare disk/NIC modifications
2702
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2703
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2704

    
2705
    # Check the validity of the `provider' parameter
2706
    if instance.disk_template in constants.DT_EXT:
2707
      for mod in self.diskmod:
2708
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2709
        if mod[0] == constants.DDM_ADD:
2710
          if ext_provider is None:
2711
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2712
                                       " '%s' missing, during disk add" %
2713
                                       (constants.DT_EXT,
2714
                                        constants.IDISK_PROVIDER),
2715
                                       errors.ECODE_NOENT)
2716
        elif mod[0] == constants.DDM_MODIFY:
2717
          if ext_provider:
2718
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2719
                                       " modification" %
2720
                                       constants.IDISK_PROVIDER,
2721
                                       errors.ECODE_INVAL)
2722
    else:
2723
      for mod in self.diskmod:
2724
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2725
        if ext_provider is not None:
2726
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2727
                                     " instances of type '%s'" %
2728
                                     (constants.IDISK_PROVIDER,
2729
                                      constants.DT_EXT),
2730
                                     errors.ECODE_INVAL)
2731

    
2732
    # OS change
2733
    if self.op.os_name and not self.op.force:
2734
      CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2735
                     self.op.force_variant)
2736
      instance_os = self.op.os_name
2737
    else:
2738
      instance_os = instance.os
2739

    
2740
    assert not (self.op.disk_template and self.op.disks), \
2741
      "Can't modify disk template and apply disk changes at the same time"
2742

    
2743
    if self.op.disk_template:
2744
      self._PreCheckDiskTemplate(pnode_info)
2745

    
2746
    # hvparams processing
2747
    if self.op.hvparams:
2748
      hv_type = instance.hypervisor
2749
      i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2750
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2751
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2752

    
2753
      # local check
2754
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2755
      CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2756
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2757
      self.hv_inst = i_hvdict # the new dict (without defaults)
2758
    else:
2759
      self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2760
                                              instance.hvparams)
2761
      self.hv_new = self.hv_inst = {}
2762

    
2763
    # beparams processing
2764
    if self.op.beparams:
2765
      i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2766
                                  use_none=True)
2767
      objects.UpgradeBeParams(i_bedict)
2768
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2769
      be_new = cluster.SimpleFillBE(i_bedict)
2770
      self.be_proposed = self.be_new = be_new # the new actual values
2771
      self.be_inst = i_bedict # the new dict (without defaults)
2772
    else:
2773
      self.be_new = self.be_inst = {}
2774
      self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2775
    be_old = cluster.FillBE(instance)
2776

    
2777
    # CPU param validation -- checking every time a parameter is
2778
    # changed to cover all cases where either CPU mask or vcpus have
2779
    # changed
2780
    if (constants.BE_VCPUS in self.be_proposed and
2781
        constants.HV_CPU_MASK in self.hv_proposed):
2782
      cpu_list = \
2783
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2784
      # Verify mask is consistent with number of vCPUs. Can skip this
2785
      # test if only 1 entry in the CPU mask, which means same mask
2786
      # is applied to all vCPUs.
2787
      if (len(cpu_list) > 1 and
2788
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2789
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2790
                                   " CPU mask [%s]" %
2791
                                   (self.be_proposed[constants.BE_VCPUS],
2792
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2793
                                   errors.ECODE_INVAL)
2794

    
2795
      # Only perform this test if a new CPU mask is given
2796
      if constants.HV_CPU_MASK in self.hv_new:
2797
        # Calculate the largest CPU number requested
2798
        max_requested_cpu = max(map(max, cpu_list))
2799
        # Check that all of the instance's nodes have enough physical CPUs to
2800
        # satisfy the requested CPU mask
2801
        _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2802
                                max_requested_cpu + 1, instance.hypervisor)
2803

    
2804
    # osparams processing
2805
    if self.op.osparams:
2806
      i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2807
      CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2808
      self.os_inst = i_osdict # the new dict (without defaults)
2809
    else:
2810
      self.os_inst = {}
2811

    
2812
    #TODO(dynmem): do the appropriate check involving MINMEM
2813
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2814
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2815
      mem_check_list = [pnode]
2816
      if be_new[constants.BE_AUTO_BALANCE]:
2817
        # either we changed auto_balance to yes or it was from before
2818
        mem_check_list.extend(instance.secondary_nodes)
2819
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2820
                                                  instance.hypervisor)
2821
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2822
                                         [instance.hypervisor], False)
2823
      pninfo = nodeinfo[pnode]
2824
      msg = pninfo.fail_msg
2825
      if msg:
2826
        # Assume the primary node is unreachable and go ahead
2827
        self.warn.append("Can't get info from primary node %s: %s" %
2828
                         (pnode, msg))
2829
      else:
2830
        (_, _, (pnhvinfo, )) = pninfo.payload
2831
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2832
          self.warn.append("Node data from primary node %s doesn't contain"
2833
                           " free memory information" % pnode)
2834
        elif instance_info.fail_msg:
2835
          self.warn.append("Can't get instance runtime information: %s" %
2836
                           instance_info.fail_msg)
2837
        else:
2838
          if instance_info.payload:
2839
            current_mem = int(instance_info.payload["memory"])
2840
          else:
2841
            # Assume instance not running
2842
            # (there is a slight race condition here, but it's not very
2843
            # probable, and we have no other way to check)
2844
            # TODO: Describe race condition
2845
            current_mem = 0
2846
          #TODO(dynmem): do the appropriate check involving MINMEM
2847
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2848
                      pnhvinfo["memory_free"])
2849
          if miss_mem > 0:
2850
            raise errors.OpPrereqError("This change will prevent the instance"
2851
                                       " from starting, due to %d MB of memory"
2852
                                       " missing on its primary node" %
2853
                                       miss_mem, errors.ECODE_NORES)
2854

    
2855
      if be_new[constants.BE_AUTO_BALANCE]:
2856
        for node, nres in nodeinfo.items():
2857
          if node not in instance.secondary_nodes:
2858
            continue
2859
          nres.Raise("Can't get info from secondary node %s" % node,
2860
                     prereq=True, ecode=errors.ECODE_STATE)
2861
          (_, _, (nhvinfo, )) = nres.payload
2862
          if not isinstance(nhvinfo.get("memory_free", None), int):
2863
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2864
                                       " memory information" % node,
2865
                                       errors.ECODE_STATE)
2866
          #TODO(dynmem): do the appropriate check involving MINMEM
2867
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2868
            raise errors.OpPrereqError("This change will prevent the instance"
2869
                                       " from failover to its secondary node"
2870
                                       " %s, due to not enough memory" % node,
2871
                                       errors.ECODE_STATE)
2872

    
2873
    if self.op.runtime_mem:
2874
      remote_info = self.rpc.call_instance_info(instance.primary_node,
2875
                                                instance.name,
2876
                                                instance.hypervisor)
2877
      remote_info.Raise("Error checking node %s" % instance.primary_node)
2878
      if not remote_info.payload: # not running already
2879
        raise errors.OpPrereqError("Instance %s is not running" %
2880
                                   instance.name, errors.ECODE_STATE)
2881

    
2882
      current_memory = remote_info.payload["memory"]
2883
      if (not self.op.force and
2884
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2885
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2886
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2887
                                   " and %d MB of memory unless --force is"
2888
                                   " given" %
2889
                                   (instance.name,
2890
                                    self.be_proposed[constants.BE_MINMEM],
2891
                                    self.be_proposed[constants.BE_MAXMEM]),
2892
                                   errors.ECODE_INVAL)
2893

    
2894
      delta = self.op.runtime_mem - current_memory
2895
      if delta > 0:
2896
        CheckNodeFreeMemory(self, instance.primary_node,
2897
                            "ballooning memory for instance %s" %
2898
                            instance.name, delta, instance.hypervisor)
2899

    
2900
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2901
      raise errors.OpPrereqError("Disk operations not supported for"
2902
                                 " diskless instances", errors.ECODE_INVAL)
2903

    
2904
    def _PrepareNicCreate(_, params, private):
2905
      self._PrepareNicModification(params, private, None, None,
2906
                                   {}, cluster, pnode)
2907
      return (None, None)
2908

    
2909
    def _PrepareNicMod(_, nic, params, private):
2910
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2911
                                   nic.nicparams, cluster, pnode)
2912
      return None
2913

    
2914
    def _PrepareNicRemove(_, params, __):
2915
      ip = params.ip
2916
      net = params.network
2917
      if net is not None and ip is not None:
2918
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2919

    
2920
    # Verify NIC changes (operating on copy)
2921
    nics = instance.nics[:]
2922
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2923
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2924
    if len(nics) > constants.MAX_NICS:
2925
      raise errors.OpPrereqError("Instance has too many network interfaces"
2926
                                 " (%d), cannot add more" % constants.MAX_NICS,
2927
                                 errors.ECODE_STATE)
2928

    
2929
    def _PrepareDiskMod(_, disk, params, __):
2930
      disk.name = params.get(constants.IDISK_NAME, None)
2931

    
2932
    # Verify disk changes (operating on a copy)
2933
    disks = copy.deepcopy(instance.disks)
2934
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2935
                        _PrepareDiskMod, None)
2936
    utils.ValidateDeviceNames("disk", disks)
2937
    if len(disks) > constants.MAX_DISKS:
2938
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2939
                                 " more" % constants.MAX_DISKS,
2940
                                 errors.ECODE_STATE)
2941
    disk_sizes = [disk.size for disk in instance.disks]
2942
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2943
                      self.diskmod if op == constants.DDM_ADD)
2944
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2945
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2946

    
2947
    if self.op.offline is not None and self.op.offline:
2948
      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2949
                         msg="can't change to offline")
2950

    
2951
    # Pre-compute NIC changes (necessary to use result in hooks)
2952
    self._nic_chgdesc = []
2953
    if self.nicmod:
2954
      # Operate on copies as this is still in prereq
2955
      nics = [nic.Copy() for nic in instance.nics]
2956
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2957
                          self._CreateNewNic, self._ApplyNicMods,
2958
                          self._RemoveNic)
2959
      # Verify that NIC names are unique and valid
2960
      utils.ValidateDeviceNames("NIC", nics)
2961
      self._new_nics = nics
2962
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2963
    else:
2964
      self._new_nics = None
2965
      ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2966

    
2967
    if not self.op.ignore_ipolicy:
2968
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2969
                                                              group_info)
2970

    
2971
      # Fill ispec with backend parameters
2972
      ispec[constants.ISPEC_SPINDLE_USE] = \
2973
        self.be_new.get(constants.BE_SPINDLE_USE, None)
2974
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2975
                                                         None)
2976

    
2977
      # Copy ispec to verify parameters with min/max values separately
2978
      if self.op.disk_template:
2979
        new_disk_template = self.op.disk_template
2980
      else:
2981
        new_disk_template = instance.disk_template
2982
      ispec_max = ispec.copy()
2983
      ispec_max[constants.ISPEC_MEM_SIZE] = \
2984
        self.be_new.get(constants.BE_MAXMEM, None)
2985
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
2986
                                                     new_disk_template)
2987
      ispec_min = ispec.copy()
2988
      ispec_min[constants.ISPEC_MEM_SIZE] = \
2989
        self.be_new.get(constants.BE_MINMEM, None)
2990
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
2991
                                                     new_disk_template)
2992

    
2993
      if (res_max or res_min):
2994
        # FIXME: Improve error message by including information about whether
2995
        # the upper or lower limit of the parameter fails the ipolicy.
2996
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
2997
               (group_info, group_info.name,
2998
                utils.CommaJoin(set(res_max + res_min))))
2999
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3000

    
3001
  def _ConvertPlainToDrbd(self, feedback_fn):
3002
    """Converts an instance from plain to drbd.
3003

3004
    """
3005
    feedback_fn("Converting template to drbd")
3006
    instance = self.instance
3007
    pnode = instance.primary_node
3008
    snode = self.op.remote_node
3009

    
3010
    assert instance.disk_template == constants.DT_PLAIN
3011

    
3012
    # create a fake disk info for _GenerateDiskTemplate
3013
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3014
                  constants.IDISK_VG: d.logical_id[0],
3015
                  constants.IDISK_NAME: d.name}
3016
                 for d in instance.disks]
3017
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3018
                                     instance.name, pnode, [snode],
3019
                                     disk_info, None, None, 0, feedback_fn,
3020
                                     self.diskparams)
3021
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3022
                                        self.diskparams)
3023
    p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
3024
    s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
3025
    info = GetInstanceInfoText(instance)
3026
    feedback_fn("Creating additional volumes...")
3027
    # first, create the missing data and meta devices
3028
    for disk in anno_disks:
3029
      # unfortunately this is... not too nice
3030
      CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3031
                           info, True, p_excl_stor)
3032
      for child in disk.children:
3033
        CreateSingleBlockDev(self, snode, instance, child, info, True,
3034
                             s_excl_stor)
3035
    # at this stage, all new LVs have been created, we can rename the
3036
    # old ones
3037
    feedback_fn("Renaming original volumes...")
3038
    rename_list = [(o, n.children[0].logical_id)
3039
                   for (o, n) in zip(instance.disks, new_disks)]
3040
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
3041
    result.Raise("Failed to rename original LVs")
3042

    
3043
    feedback_fn("Initializing DRBD devices...")
3044
    # all child devices are in place, we can now create the DRBD devices
3045
    try:
3046
      for disk in anno_disks:
3047
        for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3048
          f_create = node == pnode
3049
          CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3050
                               excl_stor)
3051
    except errors.GenericError, e:
3052
      feedback_fn("Initializing of DRBD devices failed;"
3053
                  " renaming back original volumes...")
3054
      for disk in new_disks:
3055
        self.cfg.SetDiskID(disk, pnode)
3056
      rename_back_list = [(n.children[0], o.logical_id)
3057
                          for (n, o) in zip(new_disks, instance.disks)]
3058
      result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3059
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3060
      raise
3061

    
3062
    # at this point, the instance has been modified
3063
    instance.disk_template = constants.DT_DRBD8
3064
    instance.disks = new_disks
3065
    self.cfg.Update(instance, feedback_fn)
3066

    
3067
    # Release node locks while waiting for sync
3068
    ReleaseLocks(self, locking.LEVEL_NODE)
3069

    
3070
    # disks are created, waiting for sync
3071
    disk_abort = not WaitForSync(self, instance,
3072
                                 oneshot=not self.op.wait_for_sync)
3073
    if disk_abort:
3074
      raise errors.OpExecError("There are some degraded disks for"
3075
                               " this instance, please cleanup manually")
3076

    
3077
    # Node resource locks will be released by caller
3078

    
3079
  def _ConvertDrbdToPlain(self, feedback_fn):
3080
    """Converts an instance from drbd to plain.
3081

3082
    """
3083
    instance = self.instance
3084

    
3085
    assert len(instance.secondary_nodes) == 1
3086
    assert instance.disk_template == constants.DT_DRBD8
3087

    
3088
    pnode = instance.primary_node
3089
    snode = instance.secondary_nodes[0]
3090
    feedback_fn("Converting template to plain")
3091

    
3092
    old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3093
    new_disks = [d.children[0] for d in instance.disks]
3094

    
3095
    # copy over size, mode and name
3096
    for parent, child in zip(old_disks, new_disks):
3097
      child.size = parent.size
3098
      child.mode = parent.mode
3099
      child.name = parent.name
3100

    
3101
    # this is a DRBD disk, return its port to the pool
3102
    # NOTE: this must be done right before the call to cfg.Update!
3103
    for disk in old_disks:
3104
      tcp_port = disk.logical_id[2]
3105
      self.cfg.AddTcpUdpPort(tcp_port)
3106

    
3107
    # update instance structure
3108
    instance.disks = new_disks
3109
    instance.disk_template = constants.DT_PLAIN
3110
    _UpdateIvNames(0, instance.disks)
3111
    self.cfg.Update(instance, feedback_fn)
3112

    
3113
    # Release locks in case removing disks takes a while
3114
    ReleaseLocks(self, locking.LEVEL_NODE)
3115

    
3116
    feedback_fn("Removing volumes on the secondary node...")
3117
    for disk in old_disks:
3118
      self.cfg.SetDiskID(disk, snode)
3119
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3120
      if msg:
3121
        self.LogWarning("Could not remove block device %s on node %s,"
3122
                        " continuing anyway: %s", disk.iv_name, snode, msg)
3123

    
3124
    feedback_fn("Removing unneeded volumes on the primary node...")
3125
    for idx, disk in enumerate(old_disks):
3126
      meta = disk.children[1]
3127
      self.cfg.SetDiskID(meta, pnode)
3128
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3129
      if msg:
3130
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3131
                        " continuing anyway: %s", idx, pnode, msg)
3132

    
3133
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3134
    self.LogInfo("Trying to hotplug device...")
3135
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3136
                                          self.instance, action, dev_type,
3137
                                          (device, self.instance),
3138
                                          extra, seq)
3139
    if result.fail_msg:
3140
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3141
      self.LogInfo("Continuing execution..")
3142
    else:
3143
      self.LogInfo("Hotplug done.")
3144

    
3145
  def _CreateNewDisk(self, idx, params, _):
3146
    """Creates a new disk.
3147

3148
    """
3149
    instance = self.instance
3150

    
3151
    # add a new disk
3152
    if instance.disk_template in constants.DTS_FILEBASED:
3153
      (file_driver, file_path) = instance.disks[0].logical_id
3154
      file_path = os.path.dirname(file_path)
3155
    else:
3156
      file_driver = file_path = None
3157

    
3158
    disk = \
3159
      GenerateDiskTemplate(self, instance.disk_template, instance.name,
3160
                           instance.primary_node, instance.secondary_nodes,
3161
                           [params], file_path, file_driver, idx,
3162
                           self.Log, self.diskparams)[0]
3163

    
3164
    new_disks = CreateDisks(self, instance, disks=[disk])
3165

    
3166
    if self.cluster.prealloc_wipe_disks:
3167
      # Wipe new disk
3168
      WipeOrCleanupDisks(self, instance,
3169
                         disks=[(idx, disk, 0)],
3170
                         cleanup=new_disks)
3171

    
3172
    if self.op.hotplug:
3173
      # _, device_info = AssembleInstanceDisks(self, self.instance,
3174
      #                                       [disk], check=False)
3175
      self.cfg.SetDiskID(disk, self.instance.primary_node)
3176
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3177
                                               (disk, self.instance),
3178
                                               self.instance.name, True, idx)
3179
      if result.fail_msg:
3180
        self.LogWarning("Can't assemble newly created disk %d: %s",
3181
                        idx, result.fail_msg)
3182
      else:
3183
        # _, _, dev_path = device_info[0]
3184
        _, link_name = result.payload
3185
        self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3186
                            constants.HOTPLUG_TARGET_DISK,
3187
                            disk, link_name, idx)
3188

    
3189
    return (disk, [
3190
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3191
      ])
3192

    
3193
  def _ModifyDisk(self, idx, disk, params, _):
3194
    """Modifies a disk.
3195

3196
    """
3197
    changes = []
3198
    for key, value in params.iteritems():
3199
      if key in [constants.IDISK_MODE, constants.IDISK_NAME]:
3200
        setattr(disk, key, value)
3201
        changes.append(("disk.%s/%d" % (key, idx), value))
3202
      elif self.instance.disk_template == constants.DT_EXT:
3203
        disk.params[key] = value
3204
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3205

    
3206
    return changes
3207

    
3208
  def _RemoveDisk(self, idx, root, _):
3209
    """Removes a disk.
3210

3211
    """
3212
    if self.op.hotplug:
3213
      self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3214
                          constants.HOTPLUG_TARGET_DISK,
3215
                          root, None, idx)
3216
      ShutdownInstanceDisks(self, self.instance, [root])
3217

    
3218
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3219
    for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3220
      if self.op.keep_disks and disk.dev_type in constants.DT_EXT:
3221
        continue
3222
      self.cfg.SetDiskID(disk, node)
3223
      msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3224
      if msg:
3225
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3226
                        " continuing anyway", idx, node, msg)
3227

    
3228
    # if this is a DRBD disk, return its port to the pool
3229
    if root.dev_type in constants.LDS_DRBD:
3230
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3231

    
3232
  def _CreateNewNic(self, idx, params, private):
3233
    """Creates data structure for a new network interface.
3234

3235
    """
3236
    mac = params[constants.INIC_MAC]
3237
    ip = params.get(constants.INIC_IP, None)
3238
    net = params.get(constants.INIC_NETWORK, None)
3239
    name = params.get(constants.INIC_NAME, None)
3240
    net_uuid = self.cfg.LookupNetwork(net)
3241
    #TODO: not private.filled?? can a nic have no nicparams??
3242
    nicparams = private.filled
3243
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3244
                       nicparams=nicparams)
3245
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3246

    
3247
    if self.op.hotplug:
3248
      self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3249
                          constants.HOTPLUG_TARGET_NIC,
3250
                          nobj, None, idx)
3251

    
3252
    desc = [
3253
      ("nic.%d" % idx,
3254
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3255
       (mac, ip, private.filled[constants.NIC_MODE],
3256
       private.filled[constants.NIC_LINK], net)),
3257
      ]
3258

    
3259
    return (nobj, desc)
3260

    
3261
  def _ApplyNicMods(self, idx, nic, params, private):
3262
    """Modifies a network interface.
3263

3264
    """
3265
    changes = []
3266

    
3267
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3268
      if key in params:
3269
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3270
        setattr(nic, key, params[key])
3271

    
3272
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3273
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3274
    if new_net_uuid != nic.network:
3275
      changes.append(("nic.network/%d" % idx, new_net))
3276
      nic.network = new_net_uuid
3277

    
3278
    if private.filled:
3279
      nic.nicparams = private.filled
3280

    
3281
      for (key, val) in nic.nicparams.items():
3282
        changes.append(("nic.%s/%d" % (key, idx), val))
3283

    
3284
    if self.op.hotplug:
3285
      self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3286
                          constants.HOTPLUG_TARGET_NIC,
3287
                          nic, None, idx)
3288

    
3289
    return changes
3290

    
3291
  def _RemoveNic(self, idx, nic, _):
3292
    if self.op.hotplug:
3293
      self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3294
                          constants.HOTPLUG_TARGET_NIC,
3295
                          nic, None, idx)
3296

    
3297
  def Exec(self, feedback_fn):
3298
    """Modifies an instance.
3299

3300
    All parameters take effect only at the next restart of the instance.
3301

3302
    """
3303
    # Process here the warnings from CheckPrereq, as we don't have a
3304
    # feedback_fn there.
3305
    # TODO: Replace with self.LogWarning
3306
    for warn in self.warn:
3307
      feedback_fn("WARNING: %s" % warn)
3308

    
3309
    assert ((self.op.disk_template is None) ^
3310
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3311
      "Not owning any node resource locks"
3312

    
3313
    result = []
3314
    instance = self.instance
3315

    
3316
    # New primary node
3317
    if self.op.pnode:
3318
      instance.primary_node = self.op.pnode
3319

    
3320
    # runtime memory
3321
    if self.op.runtime_mem:
3322
      rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3323
                                                     instance,
3324
                                                     self.op.runtime_mem)
3325
      rpcres.Raise("Cannot modify instance runtime memory")
3326
      result.append(("runtime_memory", self.op.runtime_mem))
3327

    
3328
    # Apply disk changes
3329
    _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3330
                        self._CreateNewDisk, self._ModifyDisk,
3331
                        self._RemoveDisk)
3332
    _UpdateIvNames(0, instance.disks)
3333

    
3334
    if self.op.disk_template:
3335
      if __debug__:
3336
        check_nodes = set(instance.all_nodes)
3337
        if self.op.remote_node:
3338
          check_nodes.add(self.op.remote_node)
3339
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3340
          owned = self.owned_locks(level)
3341
          assert not (check_nodes - owned), \
3342
            ("Not owning the correct locks, owning %r, expected at least %r" %
3343
             (owned, check_nodes))
3344

    
3345
      r_shut = ShutdownInstanceDisks(self, instance)
3346
      if not r_shut:
3347
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3348
                                 " proceed with disk template conversion")
3349
      mode = (instance.disk_template, self.op.disk_template)
3350
      try:
3351
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3352
      except:
3353
        self.cfg.ReleaseDRBDMinors(instance.name)
3354
        raise
3355
      result.append(("disk_template", self.op.disk_template))
3356

    
3357
      assert instance.disk_template == self.op.disk_template, \
3358
        ("Expected disk template '%s', found '%s'" %
3359
         (self.op.disk_template, instance.disk_template))
3360

    
3361
    # Release node and resource locks if there are any (they might already have
3362
    # been released during disk conversion)
3363
    ReleaseLocks(self, locking.LEVEL_NODE)
3364
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3365

    
3366
    # Apply NIC changes
3367
    if self._new_nics is not None:
3368
      instance.nics = self._new_nics
3369
      result.extend(self._nic_chgdesc)
3370

    
3371
    # hvparams changes
3372
    if self.op.hvparams:
3373
      instance.hvparams = self.hv_inst
3374
      for key, val in self.op.hvparams.iteritems():
3375
        result.append(("hv/%s" % key, val))
3376

    
3377
    # beparams changes
3378
    if self.op.beparams:
3379
      instance.beparams = self.be_inst
3380
      for key, val in self.op.beparams.iteritems():
3381
        result.append(("be/%s" % key, val))
3382

    
3383
    # OS change
3384
    if self.op.os_name:
3385
      instance.os = self.op.os_name
3386

    
3387
    # osparams changes
3388
    if self.op.osparams:
3389
      instance.osparams = self.os_inst
3390
      for key, val in self.op.osparams.iteritems():
3391
        result.append(("os/%s" % key, val))
3392

    
3393
    if self.op.offline is None:
3394
      # Ignore
3395
      pass
3396
    elif self.op.offline:
3397
      # Mark instance as offline
3398
      self.cfg.MarkInstanceOffline(instance.name)
3399
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3400
    else:
3401
      # Mark instance as online, but stopped
3402
      self.cfg.MarkInstanceDown(instance.name)
3403
      result.append(("admin_state", constants.ADMINST_DOWN))
3404

    
3405
    self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3406

    
3407
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3408
                self.owned_locks(locking.LEVEL_NODE)), \
3409
      "All node locks should have been released by now"
3410

    
3411
    return result
3412

    
3413
  _DISK_CONVERSIONS = {
3414
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3415
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3416
    }
3417

    
3418

    
3419
class LUInstanceChangeGroup(LogicalUnit):
3420
  HPATH = "instance-change-group"
3421
  HTYPE = constants.HTYPE_INSTANCE
3422
  REQ_BGL = False
3423

    
3424
  def ExpandNames(self):
3425
    self.share_locks = ShareAll()
3426

    
3427
    self.needed_locks = {
3428
      locking.LEVEL_NODEGROUP: [],
3429
      locking.LEVEL_NODE: [],
3430
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3431
      }
3432

    
3433
    self._ExpandAndLockInstance()
3434

    
3435
    if self.op.target_groups:
3436
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3437
                                  self.op.target_groups)
3438
    else:
3439
      self.req_target_uuids = None
3440

    
3441
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3442

    
3443
  def DeclareLocks(self, level):
3444
    if level == locking.LEVEL_NODEGROUP:
3445
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3446

    
3447
      if self.req_target_uuids:
3448
        lock_groups = set(self.req_target_uuids)
3449

    
3450
        # Lock all groups used by instance optimistically; this requires going
3451
        # via the node before it's locked, requiring verification later on
3452
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3453
        lock_groups.update(instance_groups)
3454
      else:
3455
        # No target groups, need to lock all of them
3456
        lock_groups = locking.ALL_SET
3457

    
3458
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3459

    
3460
    elif level == locking.LEVEL_NODE:
3461
      if self.req_target_uuids:
3462
        # Lock all nodes used by instances
3463
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3464
        self._LockInstancesNodes()
3465

    
3466
        # Lock all nodes in all potential target groups
3467
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3468
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3469
        member_nodes = [node_name
3470
                        for group in lock_groups
3471
                        for node_name in self.cfg.GetNodeGroup(group).members]
3472
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3473
      else:
3474
        # Lock all nodes as all groups are potential targets
3475
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3476

    
3477
  def CheckPrereq(self):
3478
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3479
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3480
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3481

    
3482
    assert (self.req_target_uuids is None or
3483
            owned_groups.issuperset(self.req_target_uuids))
3484
    assert owned_instances == set([self.op.instance_name])
3485

    
3486
    # Get instance information
3487
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3488

    
3489
    # Check if node groups for locked instance are still correct
3490
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3491
      ("Instance %s's nodes changed while we kept the lock" %
3492
       self.op.instance_name)
3493

    
3494
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3495
                                          owned_groups)
3496

    
3497
    if self.req_target_uuids:
3498
      # User requested specific target groups
3499
      self.target_uuids = frozenset(self.req_target_uuids)
3500
    else:
3501
      # All groups except those used by the instance are potential targets
3502
      self.target_uuids = owned_groups - inst_groups
3503

    
3504
    conflicting_groups = self.target_uuids & inst_groups
3505
    if conflicting_groups:
3506
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3507
                                 " used by the instance '%s'" %
3508
                                 (utils.CommaJoin(conflicting_groups),
3509
                                  self.op.instance_name),
3510
                                 errors.ECODE_INVAL)
3511

    
3512
    if not self.target_uuids:
3513
      raise errors.OpPrereqError("There are no possible target groups",
3514
                                 errors.ECODE_INVAL)
3515

    
3516
  def BuildHooksEnv(self):
3517
    """Build hooks env.
3518

3519
    """
3520
    assert self.target_uuids
3521

    
3522
    env = {
3523
      "TARGET_GROUPS": " ".join(self.target_uuids),
3524
      }
3525

    
3526
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3527

    
3528
    return env
3529

    
3530
  def BuildHooksNodes(self):
3531
    """Build hooks nodes.
3532

3533
    """
3534
    mn = self.cfg.GetMasterNode()
3535
    return ([mn], [mn])
3536

    
3537
  def Exec(self, feedback_fn):
3538
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3539

    
3540
    assert instances == [self.op.instance_name], "Instance not locked"
3541

    
3542
    req = iallocator.IAReqGroupChange(instances=instances,
3543
                                      target_groups=list(self.target_uuids))
3544
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3545

    
3546
    ial.Run(self.op.iallocator)
3547

    
3548
    if not ial.success:
3549
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3550
                                 " instance '%s' using iallocator '%s': %s" %
3551
                                 (self.op.instance_name, self.op.iallocator,
3552
                                  ial.info), errors.ECODE_NORES)
3553

    
3554
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3555

    
3556
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3557
                 " instance '%s'", len(jobs), self.op.instance_name)
3558

    
3559
    return ResultWithJobs(jobs)