Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 4665b94f

History | View | Annotate | Download (139.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_name_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176

    
177
    if net is None or net.lower() == constants.VALUE_NONE:
178
      net = None
179
    else:
180
      if nic_mode_req is not None or link is not None:
181
        raise errors.OpPrereqError("If network is given, no mode or link"
182
                                   " is allowed to be passed",
183
                                   errors.ECODE_INVAL)
184

    
185
    # ip validity checks
186
    if ip is None or ip.lower() == constants.VALUE_NONE:
187
      nic_ip = None
188
    elif ip.lower() == constants.VALUE_AUTO:
189
      if not op.name_check:
190
        raise errors.OpPrereqError("IP address set to auto but name checks"
191
                                   " have been skipped",
192
                                   errors.ECODE_INVAL)
193
      nic_ip = default_ip
194
    else:
195
      # We defer pool operations until later, so that the iallocator has
196
      # filled in the instance's node(s) dimara
197
      if ip.lower() == constants.NIC_IP_POOL:
198
        if net is None:
199
          raise errors.OpPrereqError("if ip=pool, parameter network"
200
                                     " must be passed too",
201
                                     errors.ECODE_INVAL)
202

    
203
      elif not netutils.IPAddress.IsValid(ip):
204
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
205
                                   errors.ECODE_INVAL)
206

    
207
      nic_ip = ip
208

    
209
    # TODO: check the ip address for uniqueness
210
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
211
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
212
                                 errors.ECODE_INVAL)
213

    
214
    # MAC address verification
215
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
216
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
217
      mac = utils.NormalizeAndValidateMac(mac)
218

    
219
      try:
220
        # TODO: We need to factor this out
221
        cfg.ReserveMAC(mac, ec_id)
222
      except errors.ReservationError:
223
        raise errors.OpPrereqError("MAC address %s already in use"
224
                                   " in cluster" % mac,
225
                                   errors.ECODE_NOTUNIQUE)
226

    
227
    #  Build nic parameters
228
    nicparams = {}
229
    if nic_mode_req:
230
      nicparams[constants.NIC_MODE] = nic_mode
231
    if link:
232
      nicparams[constants.NIC_LINK] = link
233

    
234
    check_params = cluster.SimpleFillNIC(nicparams)
235
    objects.NIC.CheckParameterSyntax(check_params)
236
    net_uuid = cfg.LookupNetwork(net)
237
    name = nic.get(constants.INIC_NAME, None)
238
    if name is not None and name.lower() == constants.VALUE_NONE:
239
      name = None
240
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
241
                          network=net_uuid, nicparams=nicparams)
242
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
243
    nics.append(nic_obj)
244

    
245
  return nics
246

    
247

    
248
def _CheckForConflictingIp(lu, ip, node_uuid):
249
  """In case of conflicting IP address raise error.
250

251
  @type ip: string
252
  @param ip: IP address
253
  @type node_uuid: string
254
  @param node_uuid: node UUID
255

256
  """
257
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
258
  if conf_net is not None:
259
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
260
                                " network %s, but the target NIC does not." %
261
                                (ip, conf_net)),
262
                               errors.ECODE_STATE)
263

    
264
  return (None, None)
265

    
266

    
267
def _ComputeIPolicyInstanceSpecViolation(
268
  ipolicy, instance_spec, disk_template,
269
  _compute_fn=ComputeIPolicySpecViolation):
270
  """Compute if instance specs meets the specs of ipolicy.
271

272
  @type ipolicy: dict
273
  @param ipolicy: The ipolicy to verify against
274
  @param instance_spec: dict
275
  @param instance_spec: The instance spec to verify
276
  @type disk_template: string
277
  @param disk_template: the disk template of the instance
278
  @param _compute_fn: The function to verify ipolicy (unittest only)
279
  @see: L{ComputeIPolicySpecViolation}
280

281
  """
282
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
283
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
284
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
285
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
286
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
287
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
288

    
289
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
290
                     disk_sizes, spindle_use, disk_template)
291

    
292

    
293
def _CheckOSVariant(os_obj, name):
294
  """Check whether an OS name conforms to the os variants specification.
295

296
  @type os_obj: L{objects.OS}
297
  @param os_obj: OS object to check
298
  @type name: string
299
  @param name: OS name passed by the user, to check for validity
300

301
  """
302
  variant = objects.OS.GetVariant(name)
303
  if not os_obj.supported_variants:
304
    if variant:
305
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
306
                                 " passed)" % (os_obj.name, variant),
307
                                 errors.ECODE_INVAL)
308
    return
309
  if not variant:
310
    raise errors.OpPrereqError("OS name must include a variant",
311
                               errors.ECODE_INVAL)
312

    
313
  if variant not in os_obj.supported_variants:
314
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
315

    
316

    
317
class LUInstanceCreate(LogicalUnit):
318
  """Create an instance.
319

320
  """
321
  HPATH = "instance-add"
322
  HTYPE = constants.HTYPE_INSTANCE
323
  REQ_BGL = False
324

    
325
  def CheckArguments(self):
326
    """Check arguments.
327

328
    """
329
    # do not require name_check to ease forward/backward compatibility
330
    # for tools
331
    if self.op.no_install and self.op.start:
332
      self.LogInfo("No-installation mode selected, disabling startup")
333
      self.op.start = False
334
    # validate/normalize the instance name
335
    self.op.instance_name = \
336
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
337

    
338
    if self.op.ip_check and not self.op.name_check:
339
      # TODO: make the ip check more flexible and not depend on the name check
340
      raise errors.OpPrereqError("Cannot do IP address check without a name"
341
                                 " check", errors.ECODE_INVAL)
342

    
343
    # check nics' parameter names
344
    for nic in self.op.nics:
345
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
346
    # check that NIC's parameters names are unique and valid
347
    utils.ValidateDeviceNames("NIC", self.op.nics)
348

    
349
    # check that disk's names are unique and valid
350
    utils.ValidateDeviceNames("disk", self.op.disks)
351

    
352
    cluster = self.cfg.GetClusterInfo()
353
    if not self.op.disk_template in cluster.enabled_disk_templates:
354
      raise errors.OpPrereqError("Cannot create an instance with disk template"
355
                                 " '%s', because it is not enabled in the"
356
                                 " cluster. Enabled disk templates are: %s." %
357
                                 (self.op.disk_template,
358
                                  ",".join(cluster.enabled_disk_templates)))
359

    
360
    # check disks. parameter names and consistent adopt/no-adopt strategy
361
    has_adopt = has_no_adopt = False
362
    for disk in self.op.disks:
363
      if self.op.disk_template != constants.DT_EXT:
364
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
365
      if constants.IDISK_ADOPT in disk:
366
        has_adopt = True
367
      else:
368
        has_no_adopt = True
369
    if has_adopt and has_no_adopt:
370
      raise errors.OpPrereqError("Either all disks are adopted or none is",
371
                                 errors.ECODE_INVAL)
372
    if has_adopt:
373
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
374
        raise errors.OpPrereqError("Disk adoption is not supported for the"
375
                                   " '%s' disk template" %
376
                                   self.op.disk_template,
377
                                   errors.ECODE_INVAL)
378
      if self.op.iallocator is not None:
379
        raise errors.OpPrereqError("Disk adoption not allowed with an"
380
                                   " iallocator script", errors.ECODE_INVAL)
381
      if self.op.mode == constants.INSTANCE_IMPORT:
382
        raise errors.OpPrereqError("Disk adoption not allowed for"
383
                                   " instance import", errors.ECODE_INVAL)
384
    else:
385
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
386
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
387
                                   " but no 'adopt' parameter given" %
388
                                   self.op.disk_template,
389
                                   errors.ECODE_INVAL)
390

    
391
    self.adopt_disks = has_adopt
392

    
393
    # instance name verification
394
    if self.op.name_check:
395
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
396
      self.op.instance_name = self.hostname1.name
397
      # used in CheckPrereq for ip ping check
398
      self.check_ip = self.hostname1.ip
399
    else:
400
      self.check_ip = None
401

    
402
    # file storage checks
403
    if (self.op.file_driver and
404
        not self.op.file_driver in constants.FILE_DRIVER):
405
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
406
                                 self.op.file_driver, errors.ECODE_INVAL)
407

    
408
    if self.op.disk_template == constants.DT_FILE:
409
      opcodes.RequireFileStorage()
410
    elif self.op.disk_template == constants.DT_SHARED_FILE:
411
      opcodes.RequireSharedFileStorage()
412

    
413
    ### Node/iallocator related checks
414
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
415

    
416
    if self.op.pnode is not None:
417
      if self.op.disk_template in constants.DTS_INT_MIRROR:
418
        if self.op.snode is None:
419
          raise errors.OpPrereqError("The networked disk templates need"
420
                                     " a mirror node", errors.ECODE_INVAL)
421
      elif self.op.snode:
422
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
423
                        " template")
424
        self.op.snode = None
425

    
426
    _CheckOpportunisticLocking(self.op)
427

    
428
    self._cds = GetClusterDomainSecret()
429

    
430
    if self.op.mode == constants.INSTANCE_IMPORT:
431
      # On import force_variant must be True, because if we forced it at
432
      # initial install, our only chance when importing it back is that it
433
      # works again!
434
      self.op.force_variant = True
435

    
436
      if self.op.no_install:
437
        self.LogInfo("No-installation mode has no effect during import")
438

    
439
    elif self.op.mode == constants.INSTANCE_CREATE:
440
      if self.op.os_type is None:
441
        raise errors.OpPrereqError("No guest OS specified",
442
                                   errors.ECODE_INVAL)
443
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
444
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
445
                                   " installation" % self.op.os_type,
446
                                   errors.ECODE_STATE)
447
      if self.op.disk_template is None:
448
        raise errors.OpPrereqError("No disk template specified",
449
                                   errors.ECODE_INVAL)
450

    
451
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
452
      # Check handshake to ensure both clusters have the same domain secret
453
      src_handshake = self.op.source_handshake
454
      if not src_handshake:
455
        raise errors.OpPrereqError("Missing source handshake",
456
                                   errors.ECODE_INVAL)
457

    
458
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
459
                                                           src_handshake)
460
      if errmsg:
461
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
462
                                   errors.ECODE_INVAL)
463

    
464
      # Load and check source CA
465
      self.source_x509_ca_pem = self.op.source_x509_ca
466
      if not self.source_x509_ca_pem:
467
        raise errors.OpPrereqError("Missing source X509 CA",
468
                                   errors.ECODE_INVAL)
469

    
470
      try:
471
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
472
                                                    self._cds)
473
      except OpenSSL.crypto.Error, err:
474
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
475
                                   (err, ), errors.ECODE_INVAL)
476

    
477
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
478
      if errcode is not None:
479
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
480
                                   errors.ECODE_INVAL)
481

    
482
      self.source_x509_ca = cert
483

    
484
      src_instance_name = self.op.source_instance_name
485
      if not src_instance_name:
486
        raise errors.OpPrereqError("Missing source instance name",
487
                                   errors.ECODE_INVAL)
488

    
489
      self.source_instance_name = \
490
        netutils.GetHostname(name=src_instance_name).name
491

    
492
    else:
493
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
494
                                 self.op.mode, errors.ECODE_INVAL)
495

    
496
  def ExpandNames(self):
497
    """ExpandNames for CreateInstance.
498

499
    Figure out the right locks for instance creation.
500

501
    """
502
    self.needed_locks = {}
503

    
504
    # this is just a preventive check, but someone might still add this
505
    # instance in the meantime, and creation will fail at lock-add time
506
    if self.op.instance_name in self.cfg.GetInstanceList():
507
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
508
                                 self.op.instance_name, errors.ECODE_EXISTS)
509

    
510
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
511

    
512
    if self.op.iallocator:
513
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
514
      # specifying a group on instance creation and then selecting nodes from
515
      # that group
516
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
517
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
518

    
519
      if self.op.opportunistic_locking:
520
        self.opportunistic_locks[locking.LEVEL_NODE] = True
521
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
522
    else:
523
      (self.op.pnode_uuid, self.op.pnode) = \
524
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
525
      nodelist = [self.op.pnode_uuid]
526
      if self.op.snode is not None:
527
        (self.op.snode_uuid, self.op.snode) = \
528
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
529
        nodelist.append(self.op.snode_uuid)
530
      self.needed_locks[locking.LEVEL_NODE] = nodelist
531

    
532
    # in case of import lock the source node too
533
    if self.op.mode == constants.INSTANCE_IMPORT:
534
      src_node = self.op.src_node
535
      src_path = self.op.src_path
536

    
537
      if src_path is None:
538
        self.op.src_path = src_path = self.op.instance_name
539

    
540
      if src_node is None:
541
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
542
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
543
        self.op.src_node = None
544
        if os.path.isabs(src_path):
545
          raise errors.OpPrereqError("Importing an instance from a path"
546
                                     " requires a source node option",
547
                                     errors.ECODE_INVAL)
548
      else:
549
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
550
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
551
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
552
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
553
        if not os.path.isabs(src_path):
554
          self.op.src_path = src_path = \
555
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
556

    
557
    self.needed_locks[locking.LEVEL_NODE_RES] = \
558
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
559

    
560
  def _RunAllocator(self):
561
    """Run the allocator based on input opcode.
562

563
    """
564
    if self.op.opportunistic_locking:
565
      # Only consider nodes for which a lock is held
566
      node_name_whitelist = self.cfg.GetNodeNames(
567
        self.owned_locks(locking.LEVEL_NODE))
568
    else:
569
      node_name_whitelist = None
570

    
571
    #TODO Export network to iallocator so that it chooses a pnode
572
    #     in a nodegroup that has the desired network connected to
573
    req = _CreateInstanceAllocRequest(self.op, self.disks,
574
                                      self.nics, self.be_full,
575
                                      node_name_whitelist)
576
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
577

    
578
    ial.Run(self.op.iallocator)
579

    
580
    if not ial.success:
581
      # When opportunistic locks are used only a temporary failure is generated
582
      if self.op.opportunistic_locking:
583
        ecode = errors.ECODE_TEMP_NORES
584
      else:
585
        ecode = errors.ECODE_NORES
586

    
587
      raise errors.OpPrereqError("Can't compute nodes using"
588
                                 " iallocator '%s': %s" %
589
                                 (self.op.iallocator, ial.info),
590
                                 ecode)
591

    
592
    (self.op.pnode_uuid, self.op.pnode) = \
593
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
594
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
595
                 self.op.instance_name, self.op.iallocator,
596
                 utils.CommaJoin(ial.result))
597

    
598
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
599

    
600
    if req.RequiredNodes() == 2:
601
      (self.op.snode_uuid, self.op.snode) = \
602
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
603

    
604
  def BuildHooksEnv(self):
605
    """Build hooks env.
606

607
    This runs on master, primary and secondary nodes of the instance.
608

609
    """
610
    env = {
611
      "ADD_MODE": self.op.mode,
612
      }
613
    if self.op.mode == constants.INSTANCE_IMPORT:
614
      env["SRC_NODE"] = self.op.src_node
615
      env["SRC_PATH"] = self.op.src_path
616
      env["SRC_IMAGES"] = self.src_images
617

    
618
    env.update(BuildInstanceHookEnv(
619
      name=self.op.instance_name,
620
      primary_node_name=self.op.pnode,
621
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
622
      status=self.op.start,
623
      os_type=self.op.os_type,
624
      minmem=self.be_full[constants.BE_MINMEM],
625
      maxmem=self.be_full[constants.BE_MAXMEM],
626
      vcpus=self.be_full[constants.BE_VCPUS],
627
      nics=NICListToTuple(self, self.nics),
628
      disk_template=self.op.disk_template,
629
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
630
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
631
             for d in self.disks],
632
      bep=self.be_full,
633
      hvp=self.hv_full,
634
      hypervisor_name=self.op.hypervisor,
635
      tags=self.op.tags,
636
      ))
637

    
638
    return env
639

    
640
  def BuildHooksNodes(self):
641
    """Build hooks nodes.
642

643
    """
644
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
645
    return nl, nl
646

    
647
  def _ReadExportInfo(self):
648
    """Reads the export information from disk.
649

650
    It will override the opcode source node and path with the actual
651
    information, if these two were not specified before.
652

653
    @return: the export information
654

655
    """
656
    assert self.op.mode == constants.INSTANCE_IMPORT
657

    
658
    if self.op.src_node_uuid is None:
659
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
660
      exp_list = self.rpc.call_export_list(locked_nodes)
661
      found = False
662
      for node in exp_list:
663
        if exp_list[node].fail_msg:
664
          continue
665
        if self.op.src_path in exp_list[node].payload:
666
          found = True
667
          self.op.src_node = node
668
          self.op.src_node_uuid = self.cfg.GetNodeInfoByName(node).uuid
669
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
670
                                            self.op.src_path)
671
          break
672
      if not found:
673
        raise errors.OpPrereqError("No export found for relative path %s" %
674
                                   self.op.src_path, errors.ECODE_INVAL)
675

    
676
    CheckNodeOnline(self, self.op.src_node_uuid)
677
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
678
    result.Raise("No export or invalid export found in dir %s" %
679
                 self.op.src_path)
680

    
681
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
682
    if not export_info.has_section(constants.INISECT_EXP):
683
      raise errors.ProgrammerError("Corrupted export config",
684
                                   errors.ECODE_ENVIRON)
685

    
686
    ei_version = export_info.get(constants.INISECT_EXP, "version")
687
    if int(ei_version) != constants.EXPORT_VERSION:
688
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
689
                                 (ei_version, constants.EXPORT_VERSION),
690
                                 errors.ECODE_ENVIRON)
691
    return export_info
692

    
693
  def _ReadExportParams(self, einfo):
694
    """Use export parameters as defaults.
695

696
    In case the opcode doesn't specify (as in override) some instance
697
    parameters, then try to use them from the export information, if
698
    that declares them.
699

700
    """
701
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
702

    
703
    if self.op.disk_template is None:
704
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
705
        self.op.disk_template = einfo.get(constants.INISECT_INS,
706
                                          "disk_template")
707
        if self.op.disk_template not in constants.DISK_TEMPLATES:
708
          raise errors.OpPrereqError("Disk template specified in configuration"
709
                                     " file is not one of the allowed values:"
710
                                     " %s" %
711
                                     " ".join(constants.DISK_TEMPLATES),
712
                                     errors.ECODE_INVAL)
713
      else:
714
        raise errors.OpPrereqError("No disk template specified and the export"
715
                                   " is missing the disk_template information",
716
                                   errors.ECODE_INVAL)
717

    
718
    if not self.op.disks:
719
      disks = []
720
      # TODO: import the disk iv_name too
721
      for idx in range(constants.MAX_DISKS):
722
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
723
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
724
          disks.append({constants.IDISK_SIZE: disk_sz})
725
      self.op.disks = disks
726
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
727
        raise errors.OpPrereqError("No disk info specified and the export"
728
                                   " is missing the disk information",
729
                                   errors.ECODE_INVAL)
730

    
731
    if not self.op.nics:
732
      nics = []
733
      for idx in range(constants.MAX_NICS):
734
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
735
          ndict = {}
736
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
737
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
738
            ndict[name] = v
739
          nics.append(ndict)
740
        else:
741
          break
742
      self.op.nics = nics
743

    
744
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
745
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
746

    
747
    if (self.op.hypervisor is None and
748
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
749
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
750

    
751
    if einfo.has_section(constants.INISECT_HYP):
752
      # use the export parameters but do not override the ones
753
      # specified by the user
754
      for name, value in einfo.items(constants.INISECT_HYP):
755
        if name not in self.op.hvparams:
756
          self.op.hvparams[name] = value
757

    
758
    if einfo.has_section(constants.INISECT_BEP):
759
      # use the parameters, without overriding
760
      for name, value in einfo.items(constants.INISECT_BEP):
761
        if name not in self.op.beparams:
762
          self.op.beparams[name] = value
763
        # Compatibility for the old "memory" be param
764
        if name == constants.BE_MEMORY:
765
          if constants.BE_MAXMEM not in self.op.beparams:
766
            self.op.beparams[constants.BE_MAXMEM] = value
767
          if constants.BE_MINMEM not in self.op.beparams:
768
            self.op.beparams[constants.BE_MINMEM] = value
769
    else:
770
      # try to read the parameters old style, from the main section
771
      for name in constants.BES_PARAMETERS:
772
        if (name not in self.op.beparams and
773
            einfo.has_option(constants.INISECT_INS, name)):
774
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
775

    
776
    if einfo.has_section(constants.INISECT_OSP):
777
      # use the parameters, without overriding
778
      for name, value in einfo.items(constants.INISECT_OSP):
779
        if name not in self.op.osparams:
780
          self.op.osparams[name] = value
781

    
782
  def _RevertToDefaults(self, cluster):
783
    """Revert the instance parameters to the default values.
784

785
    """
786
    # hvparams
787
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
788
    for name in self.op.hvparams.keys():
789
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
790
        del self.op.hvparams[name]
791
    # beparams
792
    be_defs = cluster.SimpleFillBE({})
793
    for name in self.op.beparams.keys():
794
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
795
        del self.op.beparams[name]
796
    # nic params
797
    nic_defs = cluster.SimpleFillNIC({})
798
    for nic in self.op.nics:
799
      for name in constants.NICS_PARAMETERS:
800
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
801
          del nic[name]
802
    # osparams
803
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
804
    for name in self.op.osparams.keys():
805
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
806
        del self.op.osparams[name]
807

    
808
  def _CalculateFileStorageDir(self):
809
    """Calculate final instance file storage dir.
810

811
    """
812
    # file storage dir calculation/check
813
    self.instance_file_storage_dir = None
814
    if self.op.disk_template in constants.DTS_FILEBASED:
815
      # build the full file storage dir path
816
      joinargs = []
817

    
818
      if self.op.disk_template == constants.DT_SHARED_FILE:
819
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
820
      else:
821
        get_fsd_fn = self.cfg.GetFileStorageDir
822

    
823
      cfg_storagedir = get_fsd_fn()
824
      if not cfg_storagedir:
825
        raise errors.OpPrereqError("Cluster file storage dir not defined",
826
                                   errors.ECODE_STATE)
827
      joinargs.append(cfg_storagedir)
828

    
829
      if self.op.file_storage_dir is not None:
830
        joinargs.append(self.op.file_storage_dir)
831

    
832
      joinargs.append(self.op.instance_name)
833

    
834
      # pylint: disable=W0142
835
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
836

    
837
  def CheckPrereq(self): # pylint: disable=R0914
838
    """Check prerequisites.
839

840
    """
841
    self._CalculateFileStorageDir()
842

    
843
    if self.op.mode == constants.INSTANCE_IMPORT:
844
      export_info = self._ReadExportInfo()
845
      self._ReadExportParams(export_info)
846
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
847
    else:
848
      self._old_instance_name = None
849

    
850
    if (not self.cfg.GetVGName() and
851
        self.op.disk_template not in constants.DTS_NOT_LVM):
852
      raise errors.OpPrereqError("Cluster does not support lvm-based"
853
                                 " instances", errors.ECODE_STATE)
854

    
855
    if (self.op.hypervisor is None or
856
        self.op.hypervisor == constants.VALUE_AUTO):
857
      self.op.hypervisor = self.cfg.GetHypervisorType()
858

    
859
    cluster = self.cfg.GetClusterInfo()
860
    enabled_hvs = cluster.enabled_hypervisors
861
    if self.op.hypervisor not in enabled_hvs:
862
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
863
                                 " cluster (%s)" %
864
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
865
                                 errors.ECODE_STATE)
866

    
867
    # Check tag validity
868
    for tag in self.op.tags:
869
      objects.TaggableObject.ValidateTag(tag)
870

    
871
    # check hypervisor parameter syntax (locally)
872
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
873
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
874
                                      self.op.hvparams)
875
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
876
    hv_type.CheckParameterSyntax(filled_hvp)
877
    self.hv_full = filled_hvp
878
    # check that we don't specify global parameters on an instance
879
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
880
                         "instance", "cluster")
881

    
882
    # fill and remember the beparams dict
883
    self.be_full = _ComputeFullBeParams(self.op, cluster)
884

    
885
    # build os parameters
886
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
887

    
888
    # now that hvp/bep are in final format, let's reset to defaults,
889
    # if told to do so
890
    if self.op.identify_defaults:
891
      self._RevertToDefaults(cluster)
892

    
893
    # NIC buildup
894
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
895
                             self.proc.GetECId())
896

    
897
    # disk checks/pre-build
898
    default_vg = self.cfg.GetVGName()
899
    self.disks = ComputeDisks(self.op, default_vg)
900

    
901
    if self.op.mode == constants.INSTANCE_IMPORT:
902
      disk_images = []
903
      for idx in range(len(self.disks)):
904
        option = "disk%d_dump" % idx
905
        if export_info.has_option(constants.INISECT_INS, option):
906
          # FIXME: are the old os-es, disk sizes, etc. useful?
907
          export_name = export_info.get(constants.INISECT_INS, option)
908
          image = utils.PathJoin(self.op.src_path, export_name)
909
          disk_images.append(image)
910
        else:
911
          disk_images.append(False)
912

    
913
      self.src_images = disk_images
914

    
915
      if self.op.instance_name == self._old_instance_name:
916
        for idx, nic in enumerate(self.nics):
917
          if nic.mac == constants.VALUE_AUTO:
918
            nic_mac_ini = "nic%d_mac" % idx
919
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
920

    
921
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
922

    
923
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
924
    if self.op.ip_check:
925
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
926
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
927
                                   (self.check_ip, self.op.instance_name),
928
                                   errors.ECODE_NOTUNIQUE)
929

    
930
    #### mac address generation
931
    # By generating here the mac address both the allocator and the hooks get
932
    # the real final mac address rather than the 'auto' or 'generate' value.
933
    # There is a race condition between the generation and the instance object
934
    # creation, which means that we know the mac is valid now, but we're not
935
    # sure it will be when we actually add the instance. If things go bad
936
    # adding the instance will abort because of a duplicate mac, and the
937
    # creation job will fail.
938
    for nic in self.nics:
939
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
940
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
941

    
942
    #### allocator run
943

    
944
    if self.op.iallocator is not None:
945
      self._RunAllocator()
946

    
947
    # Release all unneeded node locks
948
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
949
                               self.op.src_node_uuid])
950
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
951
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
952
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
953

    
954
    assert (self.owned_locks(locking.LEVEL_NODE) ==
955
            self.owned_locks(locking.LEVEL_NODE_RES)), \
956
      "Node locks differ from node resource locks"
957

    
958
    #### node related checks
959

    
960
    # check primary node
961
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
962
    assert self.pnode is not None, \
963
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
964
    if pnode.offline:
965
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
966
                                 pnode.name, errors.ECODE_STATE)
967
    if pnode.drained:
968
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
969
                                 pnode.name, errors.ECODE_STATE)
970
    if not pnode.vm_capable:
971
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
972
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
973

    
974
    self.secondaries = []
975

    
976
    # Fill in any IPs from IP pools. This must happen here, because we need to
977
    # know the nic's primary node, as specified by the iallocator
978
    for idx, nic in enumerate(self.nics):
979
      net_uuid = nic.network
980
      if net_uuid is not None:
981
        nobj = self.cfg.GetNetwork(net_uuid)
982
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
983
        if netparams is None:
984
          raise errors.OpPrereqError("No netparams found for network"
985
                                     " %s. Propably not connected to"
986
                                     " node's %s nodegroup" %
987
                                     (nobj.name, self.pnode.name),
988
                                     errors.ECODE_INVAL)
989
        self.LogInfo("NIC/%d inherits netparams %s" %
990
                     (idx, netparams.values()))
991
        nic.nicparams = dict(netparams)
992
        if nic.ip is not None:
993
          if nic.ip.lower() == constants.NIC_IP_POOL:
994
            try:
995
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
996
            except errors.ReservationError:
997
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
998
                                         " from the address pool" % idx,
999
                                         errors.ECODE_STATE)
1000
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1001
          else:
1002
            try:
1003
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1004
            except errors.ReservationError:
1005
              raise errors.OpPrereqError("IP address %s already in use"
1006
                                         " or does not belong to network %s" %
1007
                                         (nic.ip, nobj.name),
1008
                                         errors.ECODE_NOTUNIQUE)
1009

    
1010
      # net is None, ip None or given
1011
      elif self.op.conflicts_check:
1012
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1013

    
1014
    # mirror node verification
1015
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1016
      if self.op.snode_uuid == pnode.uuid:
1017
        raise errors.OpPrereqError("The secondary node cannot be the"
1018
                                   " primary node", errors.ECODE_INVAL)
1019
      CheckNodeOnline(self, self.op.snode_uuid)
1020
      CheckNodeNotDrained(self, self.op.snode_uuid)
1021
      CheckNodeVmCapable(self, self.op.snode_uuid)
1022
      self.secondaries.append(self.op.snode_uuid)
1023

    
1024
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1025
      if pnode.group != snode.group:
1026
        self.LogWarning("The primary and secondary nodes are in two"
1027
                        " different node groups; the disk parameters"
1028
                        " from the first disk's node group will be"
1029
                        " used")
1030

    
1031
    nodes = [pnode]
1032
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1033
      nodes.append(snode)
1034
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1035
    excl_stor = compat.any(map(has_es, nodes))
1036
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1037
      raise errors.OpPrereqError("Disk template %s not supported with"
1038
                                 " exclusive storage" % self.op.disk_template,
1039
                                 errors.ECODE_STATE)
1040
    for disk in self.disks:
1041
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1042

    
1043
    node_uuids = [pnode.uuid] + self.secondaries
1044

    
1045
    if not self.adopt_disks:
1046
      if self.op.disk_template == constants.DT_RBD:
1047
        # _CheckRADOSFreeSpace() is just a placeholder.
1048
        # Any function that checks prerequisites can be placed here.
1049
        # Check if there is enough space on the RADOS cluster.
1050
        CheckRADOSFreeSpace()
1051
      elif self.op.disk_template == constants.DT_EXT:
1052
        # FIXME: Function that checks prereqs if needed
1053
        pass
1054
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1055
        # Check lv size requirements, if not adopting
1056
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1057
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1058
      else:
1059
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1060
        pass
1061

    
1062
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1063
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1064
                                disk[constants.IDISK_ADOPT])
1065
                     for disk in self.disks])
1066
      if len(all_lvs) != len(self.disks):
1067
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1068
                                   errors.ECODE_INVAL)
1069
      for lv_name in all_lvs:
1070
        try:
1071
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1072
          # to ReserveLV uses the same syntax
1073
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1074
        except errors.ReservationError:
1075
          raise errors.OpPrereqError("LV named %s used by another instance" %
1076
                                     lv_name, errors.ECODE_NOTUNIQUE)
1077

    
1078
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1079
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1080

    
1081
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1082
                                       vg_names.payload.keys())[pnode.uuid]
1083
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1084
      node_lvs = node_lvs.payload
1085

    
1086
      delta = all_lvs.difference(node_lvs.keys())
1087
      if delta:
1088
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1089
                                   utils.CommaJoin(delta),
1090
                                   errors.ECODE_INVAL)
1091
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1092
      if online_lvs:
1093
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1094
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1095
                                   errors.ECODE_STATE)
1096
      # update the size of disk based on what is found
1097
      for dsk in self.disks:
1098
        dsk[constants.IDISK_SIZE] = \
1099
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1100
                                        dsk[constants.IDISK_ADOPT])][0]))
1101

    
1102
    elif self.op.disk_template == constants.DT_BLOCK:
1103
      # Normalize and de-duplicate device paths
1104
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1105
                       for disk in self.disks])
1106
      if len(all_disks) != len(self.disks):
1107
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1108
                                   errors.ECODE_INVAL)
1109
      baddisks = [d for d in all_disks
1110
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1111
      if baddisks:
1112
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1113
                                   " cannot be adopted" %
1114
                                   (utils.CommaJoin(baddisks),
1115
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1116
                                   errors.ECODE_INVAL)
1117

    
1118
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1119
                                            list(all_disks))[pnode.uuid]
1120
      node_disks.Raise("Cannot get block device information from node %s" %
1121
                       pnode.name)
1122
      node_disks = node_disks.payload
1123
      delta = all_disks.difference(node_disks.keys())
1124
      if delta:
1125
        raise errors.OpPrereqError("Missing block device(s): %s" %
1126
                                   utils.CommaJoin(delta),
1127
                                   errors.ECODE_INVAL)
1128
      for dsk in self.disks:
1129
        dsk[constants.IDISK_SIZE] = \
1130
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1131

    
1132
    # Verify instance specs
1133
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1134
    ispec = {
1135
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1136
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1137
      constants.ISPEC_DISK_COUNT: len(self.disks),
1138
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1139
                                  for disk in self.disks],
1140
      constants.ISPEC_NIC_COUNT: len(self.nics),
1141
      constants.ISPEC_SPINDLE_USE: spindle_use,
1142
      }
1143

    
1144
    group_info = self.cfg.GetNodeGroup(pnode.group)
1145
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1146
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1147
                                               self.op.disk_template)
1148
    if not self.op.ignore_ipolicy and res:
1149
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1150
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1151
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1152

    
1153
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1154

    
1155
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1156
    # check OS parameters (remotely)
1157
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1158

    
1159
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1160

    
1161
    #TODO: _CheckExtParams (remotely)
1162
    # Check parameters for extstorage
1163

    
1164
    # memory check on primary node
1165
    #TODO(dynmem): use MINMEM for checking
1166
    if self.op.start:
1167
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1168
                                self.op.hvparams)
1169
      CheckNodeFreeMemory(self, self.pnode.uuid,
1170
                          "creating instance %s" % self.op.instance_name,
1171
                          self.be_full[constants.BE_MAXMEM],
1172
                          self.op.hypervisor, hvfull)
1173

    
1174
    self.dry_run_result = list(node_uuids)
1175

    
1176
  def Exec(self, feedback_fn):
1177
    """Create and add the instance to the cluster.
1178

1179
    """
1180
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1181
                self.owned_locks(locking.LEVEL_NODE)), \
1182
      "Node locks differ from node resource locks"
1183
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1184

    
1185
    ht_kind = self.op.hypervisor
1186
    if ht_kind in constants.HTS_REQ_PORT:
1187
      network_port = self.cfg.AllocatePort()
1188
    else:
1189
      network_port = None
1190

    
1191
    # This is ugly but we got a chicken-egg problem here
1192
    # We can only take the group disk parameters, as the instance
1193
    # has no disks yet (we are generating them right here).
1194
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1195
    disks = GenerateDiskTemplate(self,
1196
                                 self.op.disk_template,
1197
                                 self.op.instance_name, self.pnode.uuid,
1198
                                 self.secondaries,
1199
                                 self.disks,
1200
                                 self.instance_file_storage_dir,
1201
                                 self.op.file_driver,
1202
                                 0,
1203
                                 feedback_fn,
1204
                                 self.cfg.GetGroupDiskParams(nodegroup))
1205

    
1206
    iobj = objects.Instance(name=self.op.instance_name, os=self.op.os_type,
1207
                            primary_node=self.pnode.uuid,
1208
                            nics=self.nics, disks=disks,
1209
                            disk_template=self.op.disk_template,
1210
                            disks_active=False,
1211
                            admin_state=constants.ADMINST_DOWN,
1212
                            network_port=network_port,
1213
                            beparams=self.op.beparams,
1214
                            hvparams=self.op.hvparams,
1215
                            hypervisor=self.op.hypervisor,
1216
                            osparams=self.op.osparams,
1217
                            )
1218

    
1219
    if self.op.tags:
1220
      for tag in self.op.tags:
1221
        iobj.AddTag(tag)
1222

    
1223
    if self.adopt_disks:
1224
      if self.op.disk_template == constants.DT_PLAIN:
1225
        # rename LVs to the newly-generated names; we need to construct
1226
        # 'fake' LV disks with the old data, plus the new unique_id
1227
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1228
        rename_to = []
1229
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1230
          rename_to.append(t_dsk.logical_id)
1231
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1232
          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1233
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1234
                                               zip(tmp_disks, rename_to))
1235
        result.Raise("Failed to rename adoped LVs")
1236
    else:
1237
      feedback_fn("* creating instance disks...")
1238
      try:
1239
        CreateDisks(self, iobj)
1240
      except errors.OpExecError:
1241
        self.LogWarning("Device creation failed")
1242
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1243
        raise
1244

    
1245
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1246

    
1247
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1248

    
1249
    # Declare that we don't want to remove the instance lock anymore, as we've
1250
    # added the instance to the config
1251
    del self.remove_locks[locking.LEVEL_INSTANCE]
1252

    
1253
    if self.op.mode == constants.INSTANCE_IMPORT:
1254
      # Release unused nodes
1255
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1256
    else:
1257
      # Release all nodes
1258
      ReleaseLocks(self, locking.LEVEL_NODE)
1259

    
1260
    disk_abort = False
1261
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1262
      feedback_fn("* wiping instance disks...")
1263
      try:
1264
        WipeDisks(self, iobj)
1265
      except errors.OpExecError, err:
1266
        logging.exception("Wiping disks failed")
1267
        self.LogWarning("Wiping instance disks failed (%s)", err)
1268
        disk_abort = True
1269

    
1270
    if disk_abort:
1271
      # Something is already wrong with the disks, don't do anything else
1272
      pass
1273
    elif self.op.wait_for_sync:
1274
      disk_abort = not WaitForSync(self, iobj)
1275
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1276
      # make sure the disks are not degraded (still sync-ing is ok)
1277
      feedback_fn("* checking mirrors status")
1278
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1279
    else:
1280
      disk_abort = False
1281

    
1282
    if disk_abort:
1283
      RemoveDisks(self, iobj)
1284
      self.cfg.RemoveInstance(iobj.name)
1285
      # Make sure the instance lock gets removed
1286
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1287
      raise errors.OpExecError("There are some degraded disks for"
1288
                               " this instance")
1289

    
1290
    # instance disks are now active
1291
    iobj.disks_active = True
1292

    
1293
    # Release all node resource locks
1294
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1295

    
1296
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1297
      # we need to set the disks ID to the primary node, since the
1298
      # preceding code might or might have not done it, depending on
1299
      # disk template and other options
1300
      for disk in iobj.disks:
1301
        self.cfg.SetDiskID(disk, self.pnode.uuid)
1302
      if self.op.mode == constants.INSTANCE_CREATE:
1303
        if not self.op.no_install:
1304
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1305
                        not self.op.wait_for_sync)
1306
          if pause_sync:
1307
            feedback_fn("* pausing disk sync to install instance OS")
1308
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1309
                                                              (iobj.disks,
1310
                                                               iobj), True)
1311
            for idx, success in enumerate(result.payload):
1312
              if not success:
1313
                logging.warn("pause-sync of instance %s for disk %d failed",
1314
                             self.op.instance_name, idx)
1315

    
1316
          feedback_fn("* running the instance OS create scripts...")
1317
          # FIXME: pass debug option from opcode to backend
1318
          os_add_result = \
1319
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1320
                                          self.op.debug_level)
1321
          if pause_sync:
1322
            feedback_fn("* resuming disk sync")
1323
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1324
                                                              (iobj.disks,
1325
                                                               iobj), False)
1326
            for idx, success in enumerate(result.payload):
1327
              if not success:
1328
                logging.warn("resume-sync of instance %s for disk %d failed",
1329
                             self.op.instance_name, idx)
1330

    
1331
          os_add_result.Raise("Could not add os for instance %s"
1332
                              " on node %s" % (self.op.instance_name,
1333
                                               self.pnode.name))
1334

    
1335
      else:
1336
        if self.op.mode == constants.INSTANCE_IMPORT:
1337
          feedback_fn("* running the instance OS import scripts...")
1338

    
1339
          transfers = []
1340

    
1341
          for idx, image in enumerate(self.src_images):
1342
            if not image:
1343
              continue
1344

    
1345
            # FIXME: pass debug option from opcode to backend
1346
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1347
                                               constants.IEIO_FILE, (image, ),
1348
                                               constants.IEIO_SCRIPT,
1349
                                               (iobj.disks[idx], idx),
1350
                                               None)
1351
            transfers.append(dt)
1352

    
1353
          import_result = \
1354
            masterd.instance.TransferInstanceData(self, feedback_fn,
1355
                                                  self.op.src_node_uuid,
1356
                                                  self.pnode.uuid,
1357
                                                  self.pnode.secondary_ip,
1358
                                                  iobj, transfers)
1359
          if not compat.all(import_result):
1360
            self.LogWarning("Some disks for instance %s on node %s were not"
1361
                            " imported successfully" % (self.op.instance_name,
1362
                                                        self.pnode.name))
1363

    
1364
          rename_from = self._old_instance_name
1365

    
1366
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1367
          feedback_fn("* preparing remote import...")
1368
          # The source cluster will stop the instance before attempting to make
1369
          # a connection. In some cases stopping an instance can take a long
1370
          # time, hence the shutdown timeout is added to the connection
1371
          # timeout.
1372
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1373
                             self.op.source_shutdown_timeout)
1374
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1375

    
1376
          assert iobj.primary_node == self.pnode.uuid
1377
          disk_results = \
1378
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1379
                                          self.source_x509_ca,
1380
                                          self._cds, timeouts)
1381
          if not compat.all(disk_results):
1382
            # TODO: Should the instance still be started, even if some disks
1383
            # failed to import (valid for local imports, too)?
1384
            self.LogWarning("Some disks for instance %s on node %s were not"
1385
                            " imported successfully" % (self.op.instance_name,
1386
                                                        self.pnode.name))
1387

    
1388
          rename_from = self.source_instance_name
1389

    
1390
        else:
1391
          # also checked in the prereq part
1392
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1393
                                       % self.op.mode)
1394

    
1395
        # Run rename script on newly imported instance
1396
        assert iobj.name == self.op.instance_name
1397
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1398
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1399
                                                   rename_from,
1400
                                                   self.op.debug_level)
1401
        result.Warn("Failed to run rename script for %s on node %s" %
1402
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1403

    
1404
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1405

    
1406
    if self.op.start:
1407
      iobj.admin_state = constants.ADMINST_UP
1408
      self.cfg.Update(iobj, feedback_fn)
1409
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1410
                   self.pnode.name)
1411
      feedback_fn("* starting instance...")
1412
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1413
                                            False, self.op.reason)
1414
      result.Raise("Could not start instance")
1415

    
1416
    return list(iobj.all_nodes)
1417

    
1418

    
1419
class LUInstanceRename(LogicalUnit):
1420
  """Rename an instance.
1421

1422
  """
1423
  HPATH = "instance-rename"
1424
  HTYPE = constants.HTYPE_INSTANCE
1425

    
1426
  def CheckArguments(self):
1427
    """Check arguments.
1428

1429
    """
1430
    if self.op.ip_check and not self.op.name_check:
1431
      # TODO: make the ip check more flexible and not depend on the name check
1432
      raise errors.OpPrereqError("IP address check requires a name check",
1433
                                 errors.ECODE_INVAL)
1434

    
1435
  def BuildHooksEnv(self):
1436
    """Build hooks env.
1437

1438
    This runs on master, primary and secondary nodes of the instance.
1439

1440
    """
1441
    env = BuildInstanceHookEnvByObject(self, self.instance)
1442
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1443
    return env
1444

    
1445
  def BuildHooksNodes(self):
1446
    """Build hooks nodes.
1447

1448
    """
1449
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1450
    return (nl, nl)
1451

    
1452
  def CheckPrereq(self):
1453
    """Check prerequisites.
1454

1455
    This checks that the instance is in the cluster and is not running.
1456

1457
    """
1458
    self.op.instance_name = ExpandInstanceName(self.cfg,
1459
                                               self.op.instance_name)
1460
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1461
    assert instance is not None
1462
    CheckNodeOnline(self, instance.primary_node)
1463
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1464
                       msg="cannot rename")
1465
    self.instance = instance
1466

    
1467
    new_name = self.op.new_name
1468
    if self.op.name_check:
1469
      hostname = _CheckHostnameSane(self, new_name)
1470
      new_name = self.op.new_name = hostname.name
1471
      if (self.op.ip_check and
1472
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1473
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1474
                                   (hostname.ip, new_name),
1475
                                   errors.ECODE_NOTUNIQUE)
1476

    
1477
    instance_list = self.cfg.GetInstanceList()
1478
    if new_name in instance_list and new_name != instance.name:
1479
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1480
                                 new_name, errors.ECODE_EXISTS)
1481

    
1482
  def Exec(self, feedback_fn):
1483
    """Rename the instance.
1484

1485
    """
1486
    old_name = self.instance.name
1487

    
1488
    rename_file_storage = False
1489
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1490
        self.op.new_name != self.instance.name):
1491
      old_file_storage_dir = os.path.dirname(
1492
                               self.instance.disks[0].logical_id[1])
1493
      rename_file_storage = True
1494

    
1495
    self.cfg.RenameInstance(self.instance.name, self.op.new_name)
1496
    # Change the instance lock. This is definitely safe while we hold the BGL.
1497
    # Otherwise the new lock would have to be added in acquired mode.
1498
    assert self.REQ_BGL
1499
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1500
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1501
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1502

    
1503
    # re-read the instance from the configuration after rename
1504
    renamed_inst = self.cfg.GetInstanceInfo(self.op.new_name)
1505

    
1506
    if rename_file_storage:
1507
      new_file_storage_dir = os.path.dirname(
1508
                               renamed_inst.disks[0].logical_id[1])
1509
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1510
                                                     old_file_storage_dir,
1511
                                                     new_file_storage_dir)
1512
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1513
                   " (but the instance has been renamed in Ganeti)" %
1514
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1515
                    old_file_storage_dir, new_file_storage_dir))
1516

    
1517
    StartInstanceDisks(self, renamed_inst, None)
1518
    # update info on disks
1519
    info = GetInstanceInfoText(renamed_inst)
1520
    for (idx, disk) in enumerate(renamed_inst.disks):
1521
      for node_uuid in renamed_inst.all_nodes:
1522
        self.cfg.SetDiskID(disk, node_uuid)
1523
        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1524
        result.Warn("Error setting info on node %s for disk %s" %
1525
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1526
    try:
1527
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1528
                                                 renamed_inst, old_name,
1529
                                                 self.op.debug_level)
1530
      result.Warn("Could not run OS rename script for instance %s on node %s"
1531
                  " (but the instance has been renamed in Ganeti)" %
1532
                  (renamed_inst.name,
1533
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1534
                  self.LogWarning)
1535
    finally:
1536
      ShutdownInstanceDisks(self, renamed_inst)
1537

    
1538
    return renamed_inst.name
1539

    
1540

    
1541
class LUInstanceRemove(LogicalUnit):
1542
  """Remove an instance.
1543

1544
  """
1545
  HPATH = "instance-remove"
1546
  HTYPE = constants.HTYPE_INSTANCE
1547
  REQ_BGL = False
1548

    
1549
  def ExpandNames(self):
1550
    self._ExpandAndLockInstance()
1551
    self.needed_locks[locking.LEVEL_NODE] = []
1552
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1553
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1554

    
1555
  def DeclareLocks(self, level):
1556
    if level == locking.LEVEL_NODE:
1557
      self._LockInstancesNodes()
1558
    elif level == locking.LEVEL_NODE_RES:
1559
      # Copy node locks
1560
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1561
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1562

    
1563
  def BuildHooksEnv(self):
1564
    """Build hooks env.
1565

1566
    This runs on master, primary and secondary nodes of the instance.
1567

1568
    """
1569
    env = BuildInstanceHookEnvByObject(self, self.instance)
1570
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1571
    return env
1572

    
1573
  def BuildHooksNodes(self):
1574
    """Build hooks nodes.
1575

1576
    """
1577
    nl = [self.cfg.GetMasterNode()]
1578
    nl_post = list(self.instance.all_nodes) + nl
1579
    return (nl, nl_post)
1580

    
1581
  def CheckPrereq(self):
1582
    """Check prerequisites.
1583

1584
    This checks that the instance is in the cluster.
1585

1586
    """
1587
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1588
    assert self.instance is not None, \
1589
      "Cannot retrieve locked instance %s" % self.op.instance_name
1590

    
1591
  def Exec(self, feedback_fn):
1592
    """Remove the instance.
1593

1594
    """
1595
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1596
                 self.cfg.GetNodeName(self.instance.primary_node))
1597

    
1598
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1599
                                             self.instance,
1600
                                             self.op.shutdown_timeout,
1601
                                             self.op.reason)
1602
    if self.op.ignore_failures:
1603
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1604
    else:
1605
      result.Raise("Could not shutdown instance %s on node %s" %
1606
                   (self.instance.name,
1607
                    self.cfg.GetNodeName(self.instance.primary_node)))
1608

    
1609
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1610
            self.owned_locks(locking.LEVEL_NODE_RES))
1611
    assert not (set(self.instance.all_nodes) -
1612
                self.owned_locks(locking.LEVEL_NODE)), \
1613
      "Not owning correct locks"
1614

    
1615
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1616

    
1617

    
1618
class LUInstanceMove(LogicalUnit):
1619
  """Move an instance by data-copying.
1620

1621
  """
1622
  HPATH = "instance-move"
1623
  HTYPE = constants.HTYPE_INSTANCE
1624
  REQ_BGL = False
1625

    
1626
  def ExpandNames(self):
1627
    self._ExpandAndLockInstance()
1628
    (self.op.target_node_uuid, self.op.target_node) = \
1629
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1630
                            self.op.target_node)
1631
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node]
1632
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1633
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1634

    
1635
  def DeclareLocks(self, level):
1636
    if level == locking.LEVEL_NODE:
1637
      self._LockInstancesNodes(primary_only=True)
1638
    elif level == locking.LEVEL_NODE_RES:
1639
      # Copy node locks
1640
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1641
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1642

    
1643
  def BuildHooksEnv(self):
1644
    """Build hooks env.
1645

1646
    This runs on master, primary and secondary nodes of the instance.
1647

1648
    """
1649
    env = {
1650
      "TARGET_NODE": self.op.target_node,
1651
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1652
      }
1653
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1654
    return env
1655

    
1656
  def BuildHooksNodes(self):
1657
    """Build hooks nodes.
1658

1659
    """
1660
    nl = [
1661
      self.cfg.GetMasterNode(),
1662
      self.instance.primary_node,
1663
      self.op.target_node_uuid,
1664
      ]
1665
    return (nl, nl)
1666

    
1667
  def CheckPrereq(self):
1668
    """Check prerequisites.
1669

1670
    This checks that the instance is in the cluster.
1671

1672
    """
1673
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1674
    assert self.instance is not None, \
1675
      "Cannot retrieve locked instance %s" % self.op.instance_name
1676

    
1677
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1678
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1679
                                 self.instance.disk_template,
1680
                                 errors.ECODE_STATE)
1681

    
1682
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1683
    assert target_node is not None, \
1684
      "Cannot retrieve locked node %s" % self.op.target_node
1685

    
1686
    self.target_node_uuid = target_node.uuid
1687
    if target_node.uuid == self.instance.primary_node:
1688
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1689
                                 (self.instance.name, target_node.name),
1690
                                 errors.ECODE_STATE)
1691

    
1692
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1693

    
1694
    for idx, dsk in enumerate(self.instance.disks):
1695
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1696
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1697
                                   " cannot copy" % idx, errors.ECODE_STATE)
1698

    
1699
    CheckNodeOnline(self, target_node.uuid)
1700
    CheckNodeNotDrained(self, target_node.uuid)
1701
    CheckNodeVmCapable(self, target_node.uuid)
1702
    cluster = self.cfg.GetClusterInfo()
1703
    group_info = self.cfg.GetNodeGroup(target_node.group)
1704
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1705
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1706
                           ignore=self.op.ignore_ipolicy)
1707

    
1708
    if self.instance.admin_state == constants.ADMINST_UP:
1709
      # check memory requirements on the secondary node
1710
      CheckNodeFreeMemory(
1711
          self, target_node.uuid, "failing over instance %s" %
1712
          self.instance.name, bep[constants.BE_MAXMEM],
1713
          self.instance.hypervisor,
1714
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1715
    else:
1716
      self.LogInfo("Not checking memory on the secondary node as"
1717
                   " instance will not be started")
1718

    
1719
    # check bridge existance
1720
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1721

    
1722
  def Exec(self, feedback_fn):
1723
    """Move an instance.
1724

1725
    The move is done by shutting it down on its present node, copying
1726
    the data over (slow) and starting it on the new node.
1727

1728
    """
1729
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1730
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1731

    
1732
    self.LogInfo("Shutting down instance %s on source node %s",
1733
                 self.instance.name, source_node.name)
1734

    
1735
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1736
            self.owned_locks(locking.LEVEL_NODE_RES))
1737

    
1738
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1739
                                             self.op.shutdown_timeout,
1740
                                             self.op.reason)
1741
    if self.op.ignore_consistency:
1742
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1743
                  " anyway. Please make sure node %s is down. Error details" %
1744
                  (self.instance.name, source_node.name, source_node.name),
1745
                  self.LogWarning)
1746
    else:
1747
      result.Raise("Could not shutdown instance %s on node %s" %
1748
                   (self.instance.name, source_node.name))
1749

    
1750
    # create the target disks
1751
    try:
1752
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1753
    except errors.OpExecError:
1754
      self.LogWarning("Device creation failed")
1755
      self.cfg.ReleaseDRBDMinors(self.instance.name)
1756
      raise
1757

    
1758
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1759

    
1760
    errs = []
1761
    # activate, get path, copy the data over
1762
    for idx, disk in enumerate(self.instance.disks):
1763
      self.LogInfo("Copying data for disk %d", idx)
1764
      result = self.rpc.call_blockdev_assemble(
1765
                 target_node.uuid, (disk, self.instance), self.instance.name,
1766
                 True, idx)
1767
      if result.fail_msg:
1768
        self.LogWarning("Can't assemble newly created disk %d: %s",
1769
                        idx, result.fail_msg)
1770
        errs.append(result.fail_msg)
1771
        break
1772
      dev_path = result.payload
1773
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1774
                                                                self.instance),
1775
                                             target_node.name, dev_path,
1776
                                             cluster_name)
1777
      if result.fail_msg:
1778
        self.LogWarning("Can't copy data over for disk %d: %s",
1779
                        idx, result.fail_msg)
1780
        errs.append(result.fail_msg)
1781
        break
1782

    
1783
    if errs:
1784
      self.LogWarning("Some disks failed to copy, aborting")
1785
      try:
1786
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1787
      finally:
1788
        self.cfg.ReleaseDRBDMinors(self.instance.name)
1789
        raise errors.OpExecError("Errors during disk copy: %s" %
1790
                                 (",".join(errs),))
1791

    
1792
    self.instance.primary_node = target_node.uuid
1793
    self.cfg.Update(self.instance, feedback_fn)
1794

    
1795
    self.LogInfo("Removing the disks on the original node")
1796
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1797

    
1798
    # Only start the instance if it's marked as up
1799
    if self.instance.admin_state == constants.ADMINST_UP:
1800
      self.LogInfo("Starting instance %s on node %s",
1801
                   self.instance.name, target_node.name)
1802

    
1803
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1804
                                          ignore_secondaries=True)
1805
      if not disks_ok:
1806
        ShutdownInstanceDisks(self, self.instance)
1807
        raise errors.OpExecError("Can't activate the instance's disks")
1808

    
1809
      result = self.rpc.call_instance_start(target_node.uuid,
1810
                                            (self.instance, None, None), False,
1811
                                            self.op.reason)
1812
      msg = result.fail_msg
1813
      if msg:
1814
        ShutdownInstanceDisks(self, self.instance)
1815
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1816
                                 (self.instance.name, target_node.name, msg))
1817

    
1818

    
1819
class LUInstanceMultiAlloc(NoHooksLU):
1820
  """Allocates multiple instances at the same time.
1821

1822
  """
1823
  REQ_BGL = False
1824

    
1825
  def CheckArguments(self):
1826
    """Check arguments.
1827

1828
    """
1829
    nodes = []
1830
    for inst in self.op.instances:
1831
      if inst.iallocator is not None:
1832
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1833
                                   " instance objects", errors.ECODE_INVAL)
1834
      nodes.append(bool(inst.pnode))
1835
      if inst.disk_template in constants.DTS_INT_MIRROR:
1836
        nodes.append(bool(inst.snode))
1837

    
1838
    has_nodes = compat.any(nodes)
1839
    if compat.all(nodes) ^ has_nodes:
1840
      raise errors.OpPrereqError("There are instance objects providing"
1841
                                 " pnode/snode while others do not",
1842
                                 errors.ECODE_INVAL)
1843

    
1844
    if self.op.iallocator is None:
1845
      default_iallocator = self.cfg.GetDefaultIAllocator()
1846
      if default_iallocator and has_nodes:
1847
        self.op.iallocator = default_iallocator
1848
      else:
1849
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1850
                                   " given and no cluster-wide default"
1851
                                   " iallocator found; please specify either"
1852
                                   " an iallocator or nodes on the instances"
1853
                                   " or set a cluster-wide default iallocator",
1854
                                   errors.ECODE_INVAL)
1855

    
1856
    _CheckOpportunisticLocking(self.op)
1857

    
1858
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1859
    if dups:
1860
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1861
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1862

    
1863
  def ExpandNames(self):
1864
    """Calculate the locks.
1865

1866
    """
1867
    self.share_locks = ShareAll()
1868
    self.needed_locks = {
1869
      # iallocator will select nodes and even if no iallocator is used,
1870
      # collisions with LUInstanceCreate should be avoided
1871
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1872
      }
1873

    
1874
    if self.op.iallocator:
1875
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1876
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1877

    
1878
      if self.op.opportunistic_locking:
1879
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1880
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1881
    else:
1882
      nodeslist = []
1883
      for inst in self.op.instances:
1884
        (inst.pnode_uuid, inst.pnode) = \
1885
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1886
        nodeslist.append(inst.pnode)
1887
        if inst.snode is not None:
1888
          (inst.snode_uuid, inst.snode) = \
1889
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1890
          nodeslist.append(inst.snode)
1891

    
1892
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1893
      # Lock resources of instance's primary and secondary nodes (copy to
1894
      # prevent accidential modification)
1895
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1896

    
1897
  def CheckPrereq(self):
1898
    """Check prerequisite.
1899

1900
    """
1901
    cluster = self.cfg.GetClusterInfo()
1902
    default_vg = self.cfg.GetVGName()
1903
    ec_id = self.proc.GetECId()
1904

    
1905
    if self.op.opportunistic_locking:
1906
      # Only consider nodes for which a lock is held
1907
      node_whitelist = self.cfg.GetNodeNames(
1908
                         list(self.owned_locks(locking.LEVEL_NODE)))
1909
    else:
1910
      node_whitelist = None
1911

    
1912
    insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1913
                                         _ComputeNics(op, cluster, None,
1914
                                                      self.cfg, ec_id),
1915
                                         _ComputeFullBeParams(op, cluster),
1916
                                         node_whitelist)
1917
             for op in self.op.instances]
1918

    
1919
    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1920
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1921

    
1922
    ial.Run(self.op.iallocator)
1923

    
1924
    if not ial.success:
1925
      raise errors.OpPrereqError("Can't compute nodes using"
1926
                                 " iallocator '%s': %s" %
1927
                                 (self.op.iallocator, ial.info),
1928
                                 errors.ECODE_NORES)
1929

    
1930
    self.ia_result = ial.result
1931

    
1932
    if self.op.dry_run:
1933
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1934
        constants.JOB_IDS_KEY: [],
1935
        })
1936

    
1937
  def _ConstructPartialResult(self):
1938
    """Contructs the partial result.
1939

1940
    """
1941
    (allocatable, failed) = self.ia_result
1942
    return {
1943
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1944
        map(compat.fst, allocatable),
1945
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1946
      }
1947

    
1948
  def Exec(self, feedback_fn):
1949
    """Executes the opcode.
1950

1951
    """
1952
    op2inst = dict((op.instance_name, op) for op in self.op.instances)
1953
    (allocatable, failed) = self.ia_result
1954

    
1955
    jobs = []
1956
    for (name, node_names) in allocatable:
1957
      op = op2inst.pop(name)
1958

    
1959
      (op.pnode_uuid, op.pnode) = \
1960
        ExpandNodeUuidAndName(self.cfg, None, node_names[0])
1961
      if len(node_names) > 1:
1962
        (op.snode_uuid, op.snode) = \
1963
          ExpandNodeUuidAndName(self.cfg, None, node_names[1])
1964

    
1965
      jobs.append([op])
1966

    
1967
    missing = set(op2inst.keys()) - set(failed)
1968
    assert not missing, \
1969
      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1970

    
1971
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
1972

    
1973

    
1974
class _InstNicModPrivate:
1975
  """Data structure for network interface modifications.
1976

1977
  Used by L{LUInstanceSetParams}.
1978

1979
  """
1980
  def __init__(self):
1981
    self.params = None
1982
    self.filled = None
1983

    
1984

    
1985
def _PrepareContainerMods(mods, private_fn):
1986
  """Prepares a list of container modifications by adding a private data field.
1987

1988
  @type mods: list of tuples; (operation, index, parameters)
1989
  @param mods: List of modifications
1990
  @type private_fn: callable or None
1991
  @param private_fn: Callable for constructing a private data field for a
1992
    modification
1993
  @rtype: list
1994

1995
  """
1996
  if private_fn is None:
1997
    fn = lambda: None
1998
  else:
1999
    fn = private_fn
2000

    
2001
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2002

    
2003

    
2004
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2005
  """Checks if nodes have enough physical CPUs
2006

2007
  This function checks if all given nodes have the needed number of
2008
  physical CPUs. In case any node has less CPUs or we cannot get the
2009
  information from the node, this function raises an OpPrereqError
2010
  exception.
2011

2012
  @type lu: C{LogicalUnit}
2013
  @param lu: a logical unit from which we get configuration data
2014
  @type node_uuids: C{list}
2015
  @param node_uuids: the list of node UUIDs to check
2016
  @type requested: C{int}
2017
  @param requested: the minimum acceptable number of physical CPUs
2018
  @type hypervisor_specs: list of pairs (string, dict of strings)
2019
  @param hypervisor_specs: list of hypervisor specifications in
2020
      pairs (hypervisor_name, hvparams)
2021
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2022
      or we cannot check the node
2023

2024
  """
2025
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs, None)
2026
  for node_uuid in node_uuids:
2027
    info = nodeinfo[node_uuid]
2028
    node_name = lu.cfg.GetNodeName(node_uuid)
2029
    info.Raise("Cannot get current information from node %s" % node_name,
2030
               prereq=True, ecode=errors.ECODE_ENVIRON)
2031
    (_, _, (hv_info, )) = info.payload
2032
    num_cpus = hv_info.get("cpu_total", None)
2033
    if not isinstance(num_cpus, int):
2034
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2035
                                 " on node %s, result was '%s'" %
2036
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2037
    if requested > num_cpus:
2038
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2039
                                 "required" % (node_name, num_cpus, requested),
2040
                                 errors.ECODE_NORES)
2041

    
2042

    
2043
def GetItemFromContainer(identifier, kind, container):
2044
  """Return the item refered by the identifier.
2045

2046
  @type identifier: string
2047
  @param identifier: Item index or name or UUID
2048
  @type kind: string
2049
  @param kind: One-word item description
2050
  @type container: list
2051
  @param container: Container to get the item from
2052

2053
  """
2054
  # Index
2055
  try:
2056
    idx = int(identifier)
2057
    if idx == -1:
2058
      # Append
2059
      absidx = len(container) - 1
2060
    elif idx < 0:
2061
      raise IndexError("Not accepting negative indices other than -1")
2062
    elif idx > len(container):
2063
      raise IndexError("Got %s index %s, but there are only %s" %
2064
                       (kind, idx, len(container)))
2065
    else:
2066
      absidx = idx
2067
    return (absidx, container[idx])
2068
  except ValueError:
2069
    pass
2070

    
2071
  for idx, item in enumerate(container):
2072
    if item.uuid == identifier or item.name == identifier:
2073
      return (idx, item)
2074

    
2075
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2076
                             (kind, identifier), errors.ECODE_NOENT)
2077

    
2078

    
2079
def _ApplyContainerMods(kind, container, chgdesc, mods,
2080
                        create_fn, modify_fn, remove_fn):
2081
  """Applies descriptions in C{mods} to C{container}.
2082

2083
  @type kind: string
2084
  @param kind: One-word item description
2085
  @type container: list
2086
  @param container: Container to modify
2087
  @type chgdesc: None or list
2088
  @param chgdesc: List of applied changes
2089
  @type mods: list
2090
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2091
  @type create_fn: callable
2092
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2093
    receives absolute item index, parameters and private data object as added
2094
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2095
    as list
2096
  @type modify_fn: callable
2097
  @param modify_fn: Callback for modifying an existing item
2098
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2099
    and private data object as added by L{_PrepareContainerMods}, returns
2100
    changes as list
2101
  @type remove_fn: callable
2102
  @param remove_fn: Callback on removing item; receives absolute item index,
2103
    item and private data object as added by L{_PrepareContainerMods}
2104

2105
  """
2106
  for (op, identifier, params, private) in mods:
2107
    changes = None
2108

    
2109
    if op == constants.DDM_ADD:
2110
      # Calculate where item will be added
2111
      # When adding an item, identifier can only be an index
2112
      try:
2113
        idx = int(identifier)
2114
      except ValueError:
2115
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2116
                                   " identifier for %s" % constants.DDM_ADD,
2117
                                   errors.ECODE_INVAL)
2118
      if idx == -1:
2119
        addidx = len(container)
2120
      else:
2121
        if idx < 0:
2122
          raise IndexError("Not accepting negative indices other than -1")
2123
        elif idx > len(container):
2124
          raise IndexError("Got %s index %s, but there are only %s" %
2125
                           (kind, idx, len(container)))
2126
        addidx = idx
2127

    
2128
      if create_fn is None:
2129
        item = params
2130
      else:
2131
        (item, changes) = create_fn(addidx, params, private)
2132

    
2133
      if idx == -1:
2134
        container.append(item)
2135
      else:
2136
        assert idx >= 0
2137
        assert idx <= len(container)
2138
        # list.insert does so before the specified index
2139
        container.insert(idx, item)
2140
    else:
2141
      # Retrieve existing item
2142
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2143

    
2144
      if op == constants.DDM_REMOVE:
2145
        assert not params
2146

    
2147
        if remove_fn is not None:
2148
          remove_fn(absidx, item, private)
2149

    
2150
        changes = [("%s/%s" % (kind, absidx), "remove")]
2151

    
2152
        assert container[absidx] == item
2153
        del container[absidx]
2154
      elif op == constants.DDM_MODIFY:
2155
        if modify_fn is not None:
2156
          changes = modify_fn(absidx, item, params, private)
2157
      else:
2158
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2159

    
2160
    assert _TApplyContModsCbChanges(changes)
2161

    
2162
    if not (chgdesc is None or changes is None):
2163
      chgdesc.extend(changes)
2164

    
2165

    
2166
def _UpdateIvNames(base_index, disks):
2167
  """Updates the C{iv_name} attribute of disks.
2168

2169
  @type disks: list of L{objects.Disk}
2170

2171
  """
2172
  for (idx, disk) in enumerate(disks):
2173
    disk.iv_name = "disk/%s" % (base_index + idx, )
2174

    
2175

    
2176
class LUInstanceSetParams(LogicalUnit):
2177
  """Modifies an instances's parameters.
2178

2179
  """
2180
  HPATH = "instance-modify"
2181
  HTYPE = constants.HTYPE_INSTANCE
2182
  REQ_BGL = False
2183

    
2184
  @staticmethod
2185
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2186
    assert ht.TList(mods)
2187
    assert not mods or len(mods[0]) in (2, 3)
2188

    
2189
    if mods and len(mods[0]) == 2:
2190
      result = []
2191

    
2192
      addremove = 0
2193
      for op, params in mods:
2194
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2195
          result.append((op, -1, params))
2196
          addremove += 1
2197

    
2198
          if addremove > 1:
2199
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2200
                                       " supported at a time" % kind,
2201
                                       errors.ECODE_INVAL)
2202
        else:
2203
          result.append((constants.DDM_MODIFY, op, params))
2204

    
2205
      assert verify_fn(result)
2206
    else:
2207
      result = mods
2208

    
2209
    return result
2210

    
2211
  @staticmethod
2212
  def _CheckMods(kind, mods, key_types, item_fn):
2213
    """Ensures requested disk/NIC modifications are valid.
2214

2215
    """
2216
    for (op, _, params) in mods:
2217
      assert ht.TDict(params)
2218

    
2219
      # If 'key_types' is an empty dict, we assume we have an
2220
      # 'ext' template and thus do not ForceDictType
2221
      if key_types:
2222
        utils.ForceDictType(params, key_types)
2223

    
2224
      if op == constants.DDM_REMOVE:
2225
        if params:
2226
          raise errors.OpPrereqError("No settings should be passed when"
2227
                                     " removing a %s" % kind,
2228
                                     errors.ECODE_INVAL)
2229
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2230
        item_fn(op, params)
2231
      else:
2232
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2233

    
2234
  @staticmethod
2235
  def _VerifyDiskModification(op, params, excl_stor):
2236
    """Verifies a disk modification.
2237

2238
    """
2239
    if op == constants.DDM_ADD:
2240
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2241
      if mode not in constants.DISK_ACCESS_SET:
2242
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2243
                                   errors.ECODE_INVAL)
2244

    
2245
      size = params.get(constants.IDISK_SIZE, None)
2246
      if size is None:
2247
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2248
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2249

    
2250
      try:
2251
        size = int(size)
2252
      except (TypeError, ValueError), err:
2253
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2254
                                   errors.ECODE_INVAL)
2255

    
2256
      params[constants.IDISK_SIZE] = size
2257
      name = params.get(constants.IDISK_NAME, None)
2258
      if name is not None and name.lower() == constants.VALUE_NONE:
2259
        params[constants.IDISK_NAME] = None
2260

    
2261
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2262

    
2263
    elif op == constants.DDM_MODIFY:
2264
      if constants.IDISK_SIZE in params:
2265
        raise errors.OpPrereqError("Disk size change not possible, use"
2266
                                   " grow-disk", errors.ECODE_INVAL)
2267
      if len(params) > 2:
2268
        raise errors.OpPrereqError("Disk modification doesn't support"
2269
                                   " additional arbitrary parameters",
2270
                                   errors.ECODE_INVAL)
2271
      name = params.get(constants.IDISK_NAME, None)
2272
      if name is not None and name.lower() == constants.VALUE_NONE:
2273
        params[constants.IDISK_NAME] = None
2274

    
2275
  @staticmethod
2276
  def _VerifyNicModification(op, params):
2277
    """Verifies a network interface modification.
2278

2279
    """
2280
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2281
      ip = params.get(constants.INIC_IP, None)
2282
      name = params.get(constants.INIC_NAME, None)
2283
      req_net = params.get(constants.INIC_NETWORK, None)
2284
      link = params.get(constants.NIC_LINK, None)
2285
      mode = params.get(constants.NIC_MODE, None)
2286
      if name is not None and name.lower() == constants.VALUE_NONE:
2287
        params[constants.INIC_NAME] = None
2288
      if req_net is not None:
2289
        if req_net.lower() == constants.VALUE_NONE:
2290
          params[constants.INIC_NETWORK] = None
2291
          req_net = None
2292
        elif link is not None or mode is not None:
2293
          raise errors.OpPrereqError("If network is given"
2294
                                     " mode or link should not",
2295
                                     errors.ECODE_INVAL)
2296

    
2297
      if op == constants.DDM_ADD:
2298
        macaddr = params.get(constants.INIC_MAC, None)
2299
        if macaddr is None:
2300
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2301

    
2302
      if ip is not None:
2303
        if ip.lower() == constants.VALUE_NONE:
2304
          params[constants.INIC_IP] = None
2305
        else:
2306
          if ip.lower() == constants.NIC_IP_POOL:
2307
            if op == constants.DDM_ADD and req_net is None:
2308
              raise errors.OpPrereqError("If ip=pool, parameter network"
2309
                                         " cannot be none",
2310
                                         errors.ECODE_INVAL)
2311
          else:
2312
            if not netutils.IPAddress.IsValid(ip):
2313
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2314
                                         errors.ECODE_INVAL)
2315

    
2316
      if constants.INIC_MAC in params:
2317
        macaddr = params[constants.INIC_MAC]
2318
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2319
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2320

    
2321
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2322
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2323
                                     " modifying an existing NIC",
2324
                                     errors.ECODE_INVAL)
2325

    
2326
  def CheckArguments(self):
2327
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2328
            self.op.hvparams or self.op.beparams or self.op.os_name or
2329
            self.op.offline is not None or self.op.runtime_mem or
2330
            self.op.pnode):
2331
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2332

    
2333
    if self.op.hvparams:
2334
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2335
                           "hypervisor", "instance", "cluster")
2336

    
2337
    self.op.disks = self._UpgradeDiskNicMods(
2338
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2339
    self.op.nics = self._UpgradeDiskNicMods(
2340
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2341

    
2342
    if self.op.disks and self.op.disk_template is not None:
2343
      raise errors.OpPrereqError("Disk template conversion and other disk"
2344
                                 " changes not supported at the same time",
2345
                                 errors.ECODE_INVAL)
2346

    
2347
    if (self.op.disk_template and
2348
        self.op.disk_template in constants.DTS_INT_MIRROR and
2349
        self.op.remote_node is None):
2350
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2351
                                 " one requires specifying a secondary node",
2352
                                 errors.ECODE_INVAL)
2353

    
2354
    # Check NIC modifications
2355
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2356
                    self._VerifyNicModification)
2357

    
2358
    if self.op.pnode:
2359
      (self.op.pnode_uuid, self.op.pnode) = \
2360
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2361

    
2362
  def ExpandNames(self):
2363
    self._ExpandAndLockInstance()
2364
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2365
    # Can't even acquire node locks in shared mode as upcoming changes in
2366
    # Ganeti 2.6 will start to modify the node object on disk conversion
2367
    self.needed_locks[locking.LEVEL_NODE] = []
2368
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2369
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2370
    # Look node group to look up the ipolicy
2371
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2372

    
2373
  def DeclareLocks(self, level):
2374
    if level == locking.LEVEL_NODEGROUP:
2375
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2376
      # Acquire locks for the instance's nodegroups optimistically. Needs
2377
      # to be verified in CheckPrereq
2378
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2379
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2380
    elif level == locking.LEVEL_NODE:
2381
      self._LockInstancesNodes()
2382
      if self.op.disk_template and self.op.remote_node:
2383
        (self.op.remote_node_uuid, self.op.remote_node) = \
2384
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2385
                                self.op.remote_node)
2386
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2387
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2388
      # Copy node locks
2389
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2390
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2391

    
2392
  def BuildHooksEnv(self):
2393
    """Build hooks env.
2394

2395
    This runs on the master, primary and secondaries.
2396

2397
    """
2398
    args = {}
2399
    if constants.BE_MINMEM in self.be_new:
2400
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2401
    if constants.BE_MAXMEM in self.be_new:
2402
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2403
    if constants.BE_VCPUS in self.be_new:
2404
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2405
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2406
    # information at all.
2407

    
2408
    if self._new_nics is not None:
2409
      nics = []
2410

    
2411
      for nic in self._new_nics:
2412
        n = copy.deepcopy(nic)
2413
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2414
        n.nicparams = nicparams
2415
        nics.append(NICToTuple(self, n))
2416

    
2417
      args["nics"] = nics
2418

    
2419
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2420
    if self.op.disk_template:
2421
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2422
    if self.op.runtime_mem:
2423
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2424

    
2425
    return env
2426

    
2427
  def BuildHooksNodes(self):
2428
    """Build hooks nodes.
2429

2430
    """
2431
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2432
    return (nl, nl)
2433

    
2434
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2435
                              old_params, cluster, pnode_uuid):
2436

    
2437
    update_params_dict = dict([(key, params[key])
2438
                               for key in constants.NICS_PARAMETERS
2439
                               if key in params])
2440

    
2441
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2442
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2443

    
2444
    new_net_uuid = None
2445
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2446
    if new_net_uuid_or_name:
2447
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2448
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2449

    
2450
    if old_net_uuid:
2451
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2452

    
2453
    if new_net_uuid:
2454
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2455
      if not netparams:
2456
        raise errors.OpPrereqError("No netparams found for the network"
2457
                                   " %s, probably not connected" %
2458
                                   new_net_obj.name, errors.ECODE_INVAL)
2459
      new_params = dict(netparams)
2460
    else:
2461
      new_params = GetUpdatedParams(old_params, update_params_dict)
2462

    
2463
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2464

    
2465
    new_filled_params = cluster.SimpleFillNIC(new_params)
2466
    objects.NIC.CheckParameterSyntax(new_filled_params)
2467

    
2468
    new_mode = new_filled_params[constants.NIC_MODE]
2469
    if new_mode == constants.NIC_MODE_BRIDGED:
2470
      bridge = new_filled_params[constants.NIC_LINK]
2471
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2472
      if msg:
2473
        msg = "Error checking bridges on node '%s': %s" % \
2474
                (self.cfg.GetNodeName(pnode_uuid), msg)
2475
        if self.op.force:
2476
          self.warn.append(msg)
2477
        else:
2478
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2479

    
2480
    elif new_mode == constants.NIC_MODE_ROUTED:
2481
      ip = params.get(constants.INIC_IP, old_ip)
2482
      if ip is None:
2483
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2484
                                   " on a routed NIC", errors.ECODE_INVAL)
2485

    
2486
    elif new_mode == constants.NIC_MODE_OVS:
2487
      # TODO: check OVS link
2488
      self.LogInfo("OVS links are currently not checked for correctness")
2489

    
2490
    if constants.INIC_MAC in params:
2491
      mac = params[constants.INIC_MAC]
2492
      if mac is None:
2493
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2494
                                   errors.ECODE_INVAL)
2495
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2496
        # otherwise generate the MAC address
2497
        params[constants.INIC_MAC] = \
2498
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2499
      else:
2500
        # or validate/reserve the current one
2501
        try:
2502
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2503
        except errors.ReservationError:
2504
          raise errors.OpPrereqError("MAC address '%s' already in use"
2505
                                     " in cluster" % mac,
2506
                                     errors.ECODE_NOTUNIQUE)
2507
    elif new_net_uuid != old_net_uuid:
2508

    
2509
      def get_net_prefix(net_uuid):
2510
        mac_prefix = None
2511
        if net_uuid:
2512
          nobj = self.cfg.GetNetwork(net_uuid)
2513
          mac_prefix = nobj.mac_prefix
2514

    
2515
        return mac_prefix
2516

    
2517
      new_prefix = get_net_prefix(new_net_uuid)
2518
      old_prefix = get_net_prefix(old_net_uuid)
2519
      if old_prefix != new_prefix:
2520
        params[constants.INIC_MAC] = \
2521
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2522

    
2523
    # if there is a change in (ip, network) tuple
2524
    new_ip = params.get(constants.INIC_IP, old_ip)
2525
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2526
      if new_ip:
2527
        # if IP is pool then require a network and generate one IP
2528
        if new_ip.lower() == constants.NIC_IP_POOL:
2529
          if new_net_uuid:
2530
            try:
2531
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2532
            except errors.ReservationError:
2533
              raise errors.OpPrereqError("Unable to get a free IP"
2534
                                         " from the address pool",
2535
                                         errors.ECODE_STATE)
2536
            self.LogInfo("Chose IP %s from network %s",
2537
                         new_ip,
2538
                         new_net_obj.name)
2539
            params[constants.INIC_IP] = new_ip
2540
          else:
2541
            raise errors.OpPrereqError("ip=pool, but no network found",
2542
                                       errors.ECODE_INVAL)
2543
        # Reserve new IP if in the new network if any
2544
        elif new_net_uuid:
2545
          try:
2546
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2547
            self.LogInfo("Reserving IP %s in network %s",
2548
                         new_ip, new_net_obj.name)
2549
          except errors.ReservationError:
2550
            raise errors.OpPrereqError("IP %s not available in network %s" %
2551
                                       (new_ip, new_net_obj.name),
2552
                                       errors.ECODE_NOTUNIQUE)
2553
        # new network is None so check if new IP is a conflicting IP
2554
        elif self.op.conflicts_check:
2555
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2556

    
2557
      # release old IP if old network is not None
2558
      if old_ip and old_net_uuid:
2559
        try:
2560
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2561
        except errors.AddressPoolError:
2562
          logging.warning("Release IP %s not contained in network %s",
2563
                          old_ip, old_net_obj.name)
2564

    
2565
    # there are no changes in (ip, network) tuple and old network is not None
2566
    elif (old_net_uuid is not None and
2567
          (req_link is not None or req_mode is not None)):
2568
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2569
                                 " a NIC that is connected to a network",
2570
                                 errors.ECODE_INVAL)
2571

    
2572
    private.params = new_params
2573
    private.filled = new_filled_params
2574

    
2575
  def _PreCheckDiskTemplate(self, pnode_info):
2576
    """CheckPrereq checks related to a new disk template."""
2577
    # Arguments are passed to avoid configuration lookups
2578
    pnode_uuid = self.instance.primary_node
2579
    if self.instance.disk_template == self.op.disk_template:
2580
      raise errors.OpPrereqError("Instance already has disk template %s" %
2581
                                 self.instance.disk_template,
2582
                                 errors.ECODE_INVAL)
2583

    
2584
    if (self.instance.disk_template,
2585
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2586
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2587
                                 " %s to %s" % (self.instance.disk_template,
2588
                                                self.op.disk_template),
2589
                                 errors.ECODE_INVAL)
2590
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2591
                       msg="cannot change disk template")
2592
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2593
      if self.op.remote_node_uuid == pnode_uuid:
2594
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2595
                                   " as the primary node of the instance" %
2596
                                   self.op.remote_node, errors.ECODE_STATE)
2597
      CheckNodeOnline(self, self.op.remote_node_uuid)
2598
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2599
      # FIXME: here we assume that the old instance type is DT_PLAIN
2600
      assert self.instance.disk_template == constants.DT_PLAIN
2601
      disks = [{constants.IDISK_SIZE: d.size,
2602
                constants.IDISK_VG: d.logical_id[0]}
2603
               for d in self.instance.disks]
2604
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2605
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2606

    
2607
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2608
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2609
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2610
                                                              snode_group)
2611
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2612
                             ignore=self.op.ignore_ipolicy)
2613
      if pnode_info.group != snode_info.group:
2614
        self.LogWarning("The primary and secondary nodes are in two"
2615
                        " different node groups; the disk parameters"
2616
                        " from the first disk's node group will be"
2617
                        " used")
2618

    
2619
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2620
      # Make sure none of the nodes require exclusive storage
2621
      nodes = [pnode_info]
2622
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2623
        assert snode_info
2624
        nodes.append(snode_info)
2625
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2626
      if compat.any(map(has_es, nodes)):
2627
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2628
                  " storage is enabled" % (self.instance.disk_template,
2629
                                           self.op.disk_template))
2630
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2631

    
2632
  def _PreCheckDisks(self, ispec):
2633
    """CheckPrereq checks related to disk changes.
2634

2635
    @type ispec: dict
2636
    @param ispec: instance specs to be updated with the new disks
2637

2638
    """
2639
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2640

    
2641
    excl_stor = compat.any(
2642
      rpc.GetExclusiveStorageForNodes(self.cfg,
2643
                                      self.instance.all_nodes).values()
2644
      )
2645

    
2646
    # Check disk modifications. This is done here and not in CheckArguments
2647
    # (as with NICs), because we need to know the instance's disk template
2648
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2649
    if self.instance.disk_template == constants.DT_EXT:
2650
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2651
    else:
2652
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2653
                      ver_fn)
2654

    
2655
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2656

    
2657
    # Check the validity of the `provider' parameter
2658
    if self.instance.disk_template in constants.DT_EXT:
2659
      for mod in self.diskmod:
2660
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2661
        if mod[0] == constants.DDM_ADD:
2662
          if ext_provider is None:
2663
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2664
                                       " '%s' missing, during disk add" %
2665
                                       (constants.DT_EXT,
2666
                                        constants.IDISK_PROVIDER),
2667
                                       errors.ECODE_NOENT)
2668
        elif mod[0] == constants.DDM_MODIFY:
2669
          if ext_provider:
2670
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2671
                                       " modification" %
2672
                                       constants.IDISK_PROVIDER,
2673
                                       errors.ECODE_INVAL)
2674
    else:
2675
      for mod in self.diskmod:
2676
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2677
        if ext_provider is not None:
2678
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2679
                                     " instances of type '%s'" %
2680
                                     (constants.IDISK_PROVIDER,
2681
                                      constants.DT_EXT),
2682
                                     errors.ECODE_INVAL)
2683

    
2684
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2685
      raise errors.OpPrereqError("Disk operations not supported for"
2686
                                 " diskless instances", errors.ECODE_INVAL)
2687

    
2688
    def _PrepareDiskMod(_, disk, params, __):
2689
      disk.name = params.get(constants.IDISK_NAME, None)
2690

    
2691
    # Verify disk changes (operating on a copy)
2692
    disks = copy.deepcopy(self.instance.disks)
2693
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2694
                        _PrepareDiskMod, None)
2695
    utils.ValidateDeviceNames("disk", disks)
2696
    if len(disks) > constants.MAX_DISKS:
2697
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2698
                                 " more" % constants.MAX_DISKS,
2699
                                 errors.ECODE_STATE)
2700
    disk_sizes = [disk.size for disk in self.instance.disks]
2701
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2702
                      self.diskmod if op == constants.DDM_ADD)
2703
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2704
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2705

    
2706
    if self.op.offline is not None and self.op.offline:
2707
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2708
                         msg="can't change to offline")
2709

    
2710
  def CheckPrereq(self):
2711
    """Check prerequisites.
2712

2713
    This only checks the instance list against the existing names.
2714

2715
    """
2716
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2717
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2718
    self.cluster = self.cfg.GetClusterInfo()
2719

    
2720
    assert self.instance is not None, \
2721
      "Cannot retrieve locked instance %s" % self.op.instance_name
2722

    
2723
    pnode_uuid = self.instance.primary_node
2724

    
2725
    self.warn = []
2726

    
2727
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2728
        not self.op.force):
2729
      # verify that the instance is not up
2730
      instance_info = self.rpc.call_instance_info(
2731
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2732
          self.instance.hvparams)
2733
      if instance_info.fail_msg:
2734
        self.warn.append("Can't get instance runtime information: %s" %
2735
                         instance_info.fail_msg)
2736
      elif instance_info.payload:
2737
        raise errors.OpPrereqError("Instance is still running on %s" %
2738
                                   self.cfg.GetNodeName(pnode_uuid),
2739
                                   errors.ECODE_STATE)
2740

    
2741
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2742
    node_uuids = list(self.instance.all_nodes)
2743
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2744

    
2745
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2746
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2747
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2748

    
2749
    # dictionary with instance information after the modification
2750
    ispec = {}
2751

    
2752
    # Prepare NIC modifications
2753
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2754

    
2755
    # OS change
2756
    if self.op.os_name and not self.op.force:
2757
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2758
                     self.op.force_variant)
2759
      instance_os = self.op.os_name
2760
    else:
2761
      instance_os = self.instance.os
2762

    
2763
    assert not (self.op.disk_template and self.op.disks), \
2764
      "Can't modify disk template and apply disk changes at the same time"
2765

    
2766
    if self.op.disk_template:
2767
      self._PreCheckDiskTemplate(pnode_info)
2768

    
2769
    self._PreCheckDisks(ispec)
2770

    
2771
    # hvparams processing
2772
    if self.op.hvparams:
2773
      hv_type = self.instance.hypervisor
2774
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2775
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2776
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2777

    
2778
      # local check
2779
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2780
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2781
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2782
      self.hv_inst = i_hvdict # the new dict (without defaults)
2783
    else:
2784
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2785
                                                   self.instance.os,
2786
                                                   self.instance.hvparams)
2787
      self.hv_new = self.hv_inst = {}
2788

    
2789
    # beparams processing
2790
    if self.op.beparams:
2791
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2792
                                  use_none=True)
2793
      objects.UpgradeBeParams(i_bedict)
2794
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2795
      be_new = self.cluster.SimpleFillBE(i_bedict)
2796
      self.be_proposed = self.be_new = be_new # the new actual values
2797
      self.be_inst = i_bedict # the new dict (without defaults)
2798
    else:
2799
      self.be_new = self.be_inst = {}
2800
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2801
    be_old = self.cluster.FillBE(self.instance)
2802

    
2803
    # CPU param validation -- checking every time a parameter is
2804
    # changed to cover all cases where either CPU mask or vcpus have
2805
    # changed
2806
    if (constants.BE_VCPUS in self.be_proposed and
2807
        constants.HV_CPU_MASK in self.hv_proposed):
2808
      cpu_list = \
2809
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2810
      # Verify mask is consistent with number of vCPUs. Can skip this
2811
      # test if only 1 entry in the CPU mask, which means same mask
2812
      # is applied to all vCPUs.
2813
      if (len(cpu_list) > 1 and
2814
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2815
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2816
                                   " CPU mask [%s]" %
2817
                                   (self.be_proposed[constants.BE_VCPUS],
2818
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2819
                                   errors.ECODE_INVAL)
2820

    
2821
      # Only perform this test if a new CPU mask is given
2822
      if constants.HV_CPU_MASK in self.hv_new:
2823
        # Calculate the largest CPU number requested
2824
        max_requested_cpu = max(map(max, cpu_list))
2825
        # Check that all of the instance's nodes have enough physical CPUs to
2826
        # satisfy the requested CPU mask
2827
        hvspecs = [(self.instance.hypervisor,
2828
                    self.cfg.GetClusterInfo()
2829
                      .hvparams[self.instance.hypervisor])]
2830
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2831
                                max_requested_cpu + 1,
2832
                                hvspecs)
2833

    
2834
    # osparams processing
2835
    if self.op.osparams:
2836
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2837
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2838
      self.os_inst = i_osdict # the new dict (without defaults)
2839
    else:
2840
      self.os_inst = {}
2841

    
2842
    #TODO(dynmem): do the appropriate check involving MINMEM
2843
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2844
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2845
      mem_check_list = [pnode_uuid]
2846
      if be_new[constants.BE_AUTO_BALANCE]:
2847
        # either we changed auto_balance to yes or it was from before
2848
        mem_check_list.extend(self.instance.secondary_nodes)
2849
      instance_info = self.rpc.call_instance_info(
2850
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2851
          self.instance.hvparams)
2852
      hvspecs = [(self.instance.hypervisor,
2853
                  self.cluster.hvparams[self.instance.hypervisor])]
2854
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2855
                                         hvspecs, False)
2856
      pninfo = nodeinfo[pnode_uuid]
2857
      msg = pninfo.fail_msg
2858
      if msg:
2859
        # Assume the primary node is unreachable and go ahead
2860
        self.warn.append("Can't get info from primary node %s: %s" %
2861
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2862
      else:
2863
        (_, _, (pnhvinfo, )) = pninfo.payload
2864
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2865
          self.warn.append("Node data from primary node %s doesn't contain"
2866
                           " free memory information" %
2867
                           self.cfg.GetNodeName(pnode_uuid))
2868
        elif instance_info.fail_msg:
2869
          self.warn.append("Can't get instance runtime information: %s" %
2870
                           instance_info.fail_msg)
2871
        else:
2872
          if instance_info.payload:
2873
            current_mem = int(instance_info.payload["memory"])
2874
          else:
2875
            # Assume instance not running
2876
            # (there is a slight race condition here, but it's not very
2877
            # probable, and we have no other way to check)
2878
            # TODO: Describe race condition
2879
            current_mem = 0
2880
          #TODO(dynmem): do the appropriate check involving MINMEM
2881
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2882
                      pnhvinfo["memory_free"])
2883
          if miss_mem > 0:
2884
            raise errors.OpPrereqError("This change will prevent the instance"
2885
                                       " from starting, due to %d MB of memory"
2886
                                       " missing on its primary node" %
2887
                                       miss_mem, errors.ECODE_NORES)
2888

    
2889
      if be_new[constants.BE_AUTO_BALANCE]:
2890
        for node_uuid, nres in nodeinfo.items():
2891
          if node_uuid not in self.instance.secondary_nodes:
2892
            continue
2893
          nres.Raise("Can't get info from secondary node %s" %
2894
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2895
                     ecode=errors.ECODE_STATE)
2896
          (_, _, (nhvinfo, )) = nres.payload
2897
          if not isinstance(nhvinfo.get("memory_free", None), int):
2898
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2899
                                       " memory information" %
2900
                                       self.cfg.GetNodeName(node_uuid),
2901
                                       errors.ECODE_STATE)
2902
          #TODO(dynmem): do the appropriate check involving MINMEM
2903
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2904
            raise errors.OpPrereqError("This change will prevent the instance"
2905
                                       " from failover to its secondary node"
2906
                                       " %s, due to not enough memory" %
2907
                                       self.cfg.GetNodeName(node_uuid),
2908
                                       errors.ECODE_STATE)
2909

    
2910
    if self.op.runtime_mem:
2911
      remote_info = self.rpc.call_instance_info(
2912
         self.instance.primary_node, self.instance.name,
2913
         self.instance.hypervisor,
2914
         self.cluster.hvparams[self.instance.hypervisor])
2915
      remote_info.Raise("Error checking node %s" %
2916
                        self.cfg.GetNodeName(self.instance.primary_node))
2917
      if not remote_info.payload: # not running already
2918
        raise errors.OpPrereqError("Instance %s is not running" %
2919
                                   self.instance.name, errors.ECODE_STATE)
2920

    
2921
      current_memory = remote_info.payload["memory"]
2922
      if (not self.op.force and
2923
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2924
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2925
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2926
                                   " and %d MB of memory unless --force is"
2927
                                   " given" %
2928
                                   (self.instance.name,
2929
                                    self.be_proposed[constants.BE_MINMEM],
2930
                                    self.be_proposed[constants.BE_MAXMEM]),
2931
                                   errors.ECODE_INVAL)
2932

    
2933
      delta = self.op.runtime_mem - current_memory
2934
      if delta > 0:
2935
        CheckNodeFreeMemory(
2936
            self, self.instance.primary_node,
2937
            "ballooning memory for instance %s" % self.instance.name, delta,
2938
            self.instance.hypervisor,
2939
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
2940

    
2941
    # make self.cluster visible in the functions below
2942
    cluster = self.cluster
2943

    
2944
    def _PrepareNicCreate(_, params, private):
2945
      self._PrepareNicModification(params, private, None, None,
2946
                                   {}, cluster, pnode_uuid)
2947
      return (None, None)
2948

    
2949
    def _PrepareNicMod(_, nic, params, private):
2950
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2951
                                   nic.nicparams, cluster, pnode_uuid)
2952
      return None
2953

    
2954
    def _PrepareNicRemove(_, params, __):
2955
      ip = params.ip
2956
      net = params.network
2957
      if net is not None and ip is not None:
2958
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2959

    
2960
    # Verify NIC changes (operating on copy)
2961
    nics = self.instance.nics[:]
2962
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2963
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2964
    if len(nics) > constants.MAX_NICS:
2965
      raise errors.OpPrereqError("Instance has too many network interfaces"
2966
                                 " (%d), cannot add more" % constants.MAX_NICS,
2967
                                 errors.ECODE_STATE)
2968

    
2969
    # Pre-compute NIC changes (necessary to use result in hooks)
2970
    self._nic_chgdesc = []
2971
    if self.nicmod:
2972
      # Operate on copies as this is still in prereq
2973
      nics = [nic.Copy() for nic in self.instance.nics]
2974
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2975
                          self._CreateNewNic, self._ApplyNicMods, None)
2976
      # Verify that NIC names are unique and valid
2977
      utils.ValidateDeviceNames("NIC", nics)
2978
      self._new_nics = nics
2979
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2980
    else:
2981
      self._new_nics = None
2982
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
2983

    
2984
    if not self.op.ignore_ipolicy:
2985
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2986
                                                              group_info)
2987

    
2988
      # Fill ispec with backend parameters
2989
      ispec[constants.ISPEC_SPINDLE_USE] = \
2990
        self.be_new.get(constants.BE_SPINDLE_USE, None)
2991
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2992
                                                         None)
2993

    
2994
      # Copy ispec to verify parameters with min/max values separately
2995
      if self.op.disk_template:
2996
        new_disk_template = self.op.disk_template
2997
      else:
2998
        new_disk_template = self.instance.disk_template
2999
      ispec_max = ispec.copy()
3000
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3001
        self.be_new.get(constants.BE_MAXMEM, None)
3002
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3003
                                                     new_disk_template)
3004
      ispec_min = ispec.copy()
3005
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3006
        self.be_new.get(constants.BE_MINMEM, None)
3007
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3008
                                                     new_disk_template)
3009

    
3010
      if (res_max or res_min):
3011
        # FIXME: Improve error message by including information about whether
3012
        # the upper or lower limit of the parameter fails the ipolicy.
3013
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3014
               (group_info, group_info.name,
3015
                utils.CommaJoin(set(res_max + res_min))))
3016
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3017

    
3018
  def _ConvertPlainToDrbd(self, feedback_fn):
3019
    """Converts an instance from plain to drbd.
3020

3021
    """
3022
    feedback_fn("Converting template to drbd")
3023
    pnode_uuid = self.instance.primary_node
3024
    snode_uuid = self.op.remote_node_uuid
3025

    
3026
    assert self.instance.disk_template == constants.DT_PLAIN
3027

    
3028
    # create a fake disk info for _GenerateDiskTemplate
3029
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3030
                  constants.IDISK_VG: d.logical_id[0],
3031
                  constants.IDISK_NAME: d.name}
3032
                 for d in self.instance.disks]
3033
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3034
                                     self.instance.name, pnode_uuid,
3035
                                     [snode_uuid], disk_info, None, None, 0,
3036
                                     feedback_fn, self.diskparams)
3037
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3038
                                        self.diskparams)
3039
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3040
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3041
    info = GetInstanceInfoText(self.instance)
3042
    feedback_fn("Creating additional volumes...")
3043
    # first, create the missing data and meta devices
3044
    for disk in anno_disks:
3045
      # unfortunately this is... not too nice
3046
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3047
                           info, True, p_excl_stor)
3048
      for child in disk.children:
3049
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3050
                             s_excl_stor)
3051
    # at this stage, all new LVs have been created, we can rename the
3052
    # old ones
3053
    feedback_fn("Renaming original volumes...")
3054
    rename_list = [(o, n.children[0].logical_id)
3055
                   for (o, n) in zip(self.instance.disks, new_disks)]
3056
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3057
    result.Raise("Failed to rename original LVs")
3058

    
3059
    feedback_fn("Initializing DRBD devices...")
3060
    # all child devices are in place, we can now create the DRBD devices
3061
    try:
3062
      for disk in anno_disks:
3063
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3064
                                       (snode_uuid, s_excl_stor)]:
3065
          f_create = node_uuid == pnode_uuid
3066
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3067
                               f_create, excl_stor)
3068
    except errors.GenericError, e:
3069
      feedback_fn("Initializing of DRBD devices failed;"
3070
                  " renaming back original volumes...")
3071
      for disk in new_disks:
3072
        self.cfg.SetDiskID(disk, pnode_uuid)
3073
      rename_back_list = [(n.children[0], o.logical_id)
3074
                          for (n, o) in zip(new_disks, self.instance.disks)]
3075
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3076
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3077
      raise
3078

    
3079
    # at this point, the instance has been modified
3080
    self.instance.disk_template = constants.DT_DRBD8
3081
    self.instance.disks = new_disks
3082
    self.cfg.Update(self.instance, feedback_fn)
3083

    
3084
    # Release node locks while waiting for sync
3085
    ReleaseLocks(self, locking.LEVEL_NODE)
3086

    
3087
    # disks are created, waiting for sync
3088
    disk_abort = not WaitForSync(self, self.instance,
3089
                                 oneshot=not self.op.wait_for_sync)
3090
    if disk_abort:
3091
      raise errors.OpExecError("There are some degraded disks for"
3092
                               " this instance, please cleanup manually")
3093

    
3094
    # Node resource locks will be released by caller
3095

    
3096
  def _ConvertDrbdToPlain(self, feedback_fn):
3097
    """Converts an instance from drbd to plain.
3098

3099
    """
3100
    assert len(self.instance.secondary_nodes) == 1
3101
    assert self.instance.disk_template == constants.DT_DRBD8
3102

    
3103
    pnode_uuid = self.instance.primary_node
3104
    snode_uuid = self.instance.secondary_nodes[0]
3105
    feedback_fn("Converting template to plain")
3106

    
3107
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3108
    new_disks = [d.children[0] for d in self.instance.disks]
3109

    
3110
    # copy over size, mode and name
3111
    for parent, child in zip(old_disks, new_disks):
3112
      child.size = parent.size
3113
      child.mode = parent.mode
3114
      child.name = parent.name
3115

    
3116
    # this is a DRBD disk, return its port to the pool
3117
    # NOTE: this must be done right before the call to cfg.Update!
3118
    for disk in old_disks:
3119
      tcp_port = disk.logical_id[2]
3120
      self.cfg.AddTcpUdpPort(tcp_port)
3121

    
3122
    # update instance structure
3123
    self.instance.disks = new_disks
3124
    self.instance.disk_template = constants.DT_PLAIN
3125
    _UpdateIvNames(0, self.instance.disks)
3126
    self.cfg.Update(self.instance, feedback_fn)
3127

    
3128
    # Release locks in case removing disks takes a while
3129
    ReleaseLocks(self, locking.LEVEL_NODE)
3130

    
3131
    feedback_fn("Removing volumes on the secondary node...")
3132
    for disk in old_disks:
3133
      self.cfg.SetDiskID(disk, snode_uuid)
3134
      msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3135
      if msg:
3136
        self.LogWarning("Could not remove block device %s on node %s,"
3137
                        " continuing anyway: %s", disk.iv_name,
3138
                        self.cfg.GetNodeName(snode_uuid), msg)
3139

    
3140
    feedback_fn("Removing unneeded volumes on the primary node...")
3141
    for idx, disk in enumerate(old_disks):
3142
      meta = disk.children[1]
3143
      self.cfg.SetDiskID(meta, pnode_uuid)
3144
      msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3145
      if msg:
3146
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3147
                        " continuing anyway: %s", idx,
3148
                        self.cfg.GetNodeName(pnode_uuid), msg)
3149

    
3150
  def _CreateNewDisk(self, idx, params, _):
3151
    """Creates a new disk.
3152

3153
    """
3154
    # add a new disk
3155
    if self.instance.disk_template in constants.DTS_FILEBASED:
3156
      (file_driver, file_path) = self.instance.disks[0].logical_id
3157
      file_path = os.path.dirname(file_path)
3158
    else:
3159
      file_driver = file_path = None
3160

    
3161
    disk = \
3162
      GenerateDiskTemplate(self, self.instance.disk_template,
3163
                           self.instance.name, self.instance.primary_node,
3164
                           self.instance.secondary_nodes, [params], file_path,
3165
                           file_driver, idx, self.Log, self.diskparams)[0]
3166

    
3167
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3168

    
3169
    if self.cluster.prealloc_wipe_disks:
3170
      # Wipe new disk
3171
      WipeOrCleanupDisks(self, self.instance,
3172
                         disks=[(idx, disk, 0)],
3173
                         cleanup=new_disks)
3174

    
3175
    return (disk, [
3176
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3177
      ])
3178

    
3179
  @staticmethod
3180
  def _ModifyDisk(idx, disk, params, _):
3181
    """Modifies a disk.
3182

3183
    """
3184
    changes = []
3185
    mode = params.get(constants.IDISK_MODE, None)
3186
    if mode:
3187
      disk.mode = mode
3188
      changes.append(("disk.mode/%d" % idx, disk.mode))
3189

    
3190
    name = params.get(constants.IDISK_NAME, None)
3191
    disk.name = name
3192
    changes.append(("disk.name/%d" % idx, disk.name))
3193

    
3194
    return changes
3195

    
3196
  def _RemoveDisk(self, idx, root, _):
3197
    """Removes a disk.
3198

3199
    """
3200
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3201
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3202
                             self.instance.primary_node):
3203
      self.cfg.SetDiskID(disk, node_uuid)
3204
      msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3205
      if msg:
3206
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3207
                        " continuing anyway", idx,
3208
                        self.cfg.GetNodeName(node_uuid), msg)
3209

    
3210
    # if this is a DRBD disk, return its port to the pool
3211
    if root.dev_type in constants.LDS_DRBD:
3212
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3213

    
3214
  def _CreateNewNic(self, idx, params, private):
3215
    """Creates data structure for a new network interface.
3216

3217
    """
3218
    mac = params[constants.INIC_MAC]
3219
    ip = params.get(constants.INIC_IP, None)
3220
    net = params.get(constants.INIC_NETWORK, None)
3221
    name = params.get(constants.INIC_NAME, None)
3222
    net_uuid = self.cfg.LookupNetwork(net)
3223
    #TODO: not private.filled?? can a nic have no nicparams??
3224
    nicparams = private.filled
3225
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3226
                       nicparams=nicparams)
3227
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3228

    
3229
    return (nobj, [
3230
      ("nic.%d" % idx,
3231
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3232
       (mac, ip, private.filled[constants.NIC_MODE],
3233
       private.filled[constants.NIC_LINK],
3234
       net)),
3235
      ])
3236

    
3237
  def _ApplyNicMods(self, idx, nic, params, private):
3238
    """Modifies a network interface.
3239

3240
    """
3241
    changes = []
3242

    
3243
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3244
      if key in params:
3245
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3246
        setattr(nic, key, params[key])
3247

    
3248
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3249
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3250
    if new_net_uuid != nic.network:
3251
      changes.append(("nic.network/%d" % idx, new_net))
3252
      nic.network = new_net_uuid
3253

    
3254
    if private.filled:
3255
      nic.nicparams = private.filled
3256

    
3257
      for (key, val) in nic.nicparams.items():
3258
        changes.append(("nic.%s/%d" % (key, idx), val))
3259

    
3260
    return changes
3261

    
3262
  def Exec(self, feedback_fn):
3263
    """Modifies an instance.
3264

3265
    All parameters take effect only at the next restart of the instance.
3266

3267
    """
3268
    # Process here the warnings from CheckPrereq, as we don't have a
3269
    # feedback_fn there.
3270
    # TODO: Replace with self.LogWarning
3271
    for warn in self.warn:
3272
      feedback_fn("WARNING: %s" % warn)
3273

    
3274
    assert ((self.op.disk_template is None) ^
3275
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3276
      "Not owning any node resource locks"
3277

    
3278
    result = []
3279

    
3280
    # New primary node
3281
    if self.op.pnode_uuid:
3282
      self.instance.primary_node = self.op.pnode_uuid
3283

    
3284
    # runtime memory
3285
    if self.op.runtime_mem:
3286
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3287
                                                     self.instance,
3288
                                                     self.op.runtime_mem)
3289
      rpcres.Raise("Cannot modify instance runtime memory")
3290
      result.append(("runtime_memory", self.op.runtime_mem))
3291

    
3292
    # Apply disk changes
3293
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3294
                        self._CreateNewDisk, self._ModifyDisk,
3295
                        self._RemoveDisk)
3296
    _UpdateIvNames(0, self.instance.disks)
3297

    
3298
    if self.op.disk_template:
3299
      if __debug__:
3300
        check_nodes = set(self.instance.all_nodes)
3301
        if self.op.remote_node_uuid:
3302
          check_nodes.add(self.op.remote_node_uuid)
3303
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3304
          owned = self.owned_locks(level)
3305
          assert not (check_nodes - owned), \
3306
            ("Not owning the correct locks, owning %r, expected at least %r" %
3307
             (owned, check_nodes))
3308

    
3309
      r_shut = ShutdownInstanceDisks(self, self.instance)
3310
      if not r_shut:
3311
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3312
                                 " proceed with disk template conversion")
3313
      mode = (self.instance.disk_template, self.op.disk_template)
3314
      try:
3315
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3316
      except:
3317
        self.cfg.ReleaseDRBDMinors(self.instance.name)
3318
        raise
3319
      result.append(("disk_template", self.op.disk_template))
3320

    
3321
      assert self.instance.disk_template == self.op.disk_template, \
3322
        ("Expected disk template '%s', found '%s'" %
3323
         (self.op.disk_template, self.instance.disk_template))
3324

    
3325
    # Release node and resource locks if there are any (they might already have
3326
    # been released during disk conversion)
3327
    ReleaseLocks(self, locking.LEVEL_NODE)
3328
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3329

    
3330
    # Apply NIC changes
3331
    if self._new_nics is not None:
3332
      self.instance.nics = self._new_nics
3333
      result.extend(self._nic_chgdesc)
3334

    
3335
    # hvparams changes
3336
    if self.op.hvparams:
3337
      self.instance.hvparams = self.hv_inst
3338
      for key, val in self.op.hvparams.iteritems():
3339
        result.append(("hv/%s" % key, val))
3340

    
3341
    # beparams changes
3342
    if self.op.beparams:
3343
      self.instance.beparams = self.be_inst
3344
      for key, val in self.op.beparams.iteritems():
3345
        result.append(("be/%s" % key, val))
3346

    
3347
    # OS change
3348
    if self.op.os_name:
3349
      self.instance.os = self.op.os_name
3350

    
3351
    # osparams changes
3352
    if self.op.osparams:
3353
      self.instance.osparams = self.os_inst
3354
      for key, val in self.op.osparams.iteritems():
3355
        result.append(("os/%s" % key, val))
3356

    
3357
    if self.op.offline is None:
3358
      # Ignore
3359
      pass
3360
    elif self.op.offline:
3361
      # Mark instance as offline
3362
      self.cfg.MarkInstanceOffline(self.instance.name)
3363
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3364
    else:
3365
      # Mark instance as online, but stopped
3366
      self.cfg.MarkInstanceDown(self.instance.name)
3367
      result.append(("admin_state", constants.ADMINST_DOWN))
3368

    
3369
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3370

    
3371
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3372
                self.owned_locks(locking.LEVEL_NODE)), \
3373
      "All node locks should have been released by now"
3374

    
3375
    return result
3376

    
3377
  _DISK_CONVERSIONS = {
3378
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3379
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3380
    }
3381

    
3382

    
3383
class LUInstanceChangeGroup(LogicalUnit):
3384
  HPATH = "instance-change-group"
3385
  HTYPE = constants.HTYPE_INSTANCE
3386
  REQ_BGL = False
3387

    
3388
  def ExpandNames(self):
3389
    self.share_locks = ShareAll()
3390

    
3391
    self.needed_locks = {
3392
      locking.LEVEL_NODEGROUP: [],
3393
      locking.LEVEL_NODE: [],
3394
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3395
      }
3396

    
3397
    self._ExpandAndLockInstance()
3398

    
3399
    if self.op.target_groups:
3400
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3401
                                  self.op.target_groups)
3402
    else:
3403
      self.req_target_uuids = None
3404

    
3405
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3406

    
3407
  def DeclareLocks(self, level):
3408
    if level == locking.LEVEL_NODEGROUP:
3409
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3410

    
3411
      if self.req_target_uuids:
3412
        lock_groups = set(self.req_target_uuids)
3413

    
3414
        # Lock all groups used by instance optimistically; this requires going
3415
        # via the node before it's locked, requiring verification later on
3416
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3417
        lock_groups.update(instance_groups)
3418
      else:
3419
        # No target groups, need to lock all of them
3420
        lock_groups = locking.ALL_SET
3421

    
3422
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3423

    
3424
    elif level == locking.LEVEL_NODE:
3425
      if self.req_target_uuids:
3426
        # Lock all nodes used by instances
3427
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3428
        self._LockInstancesNodes()
3429

    
3430
        # Lock all nodes in all potential target groups
3431
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3432
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3433
        member_nodes = [node_uuid
3434
                        for group in lock_groups
3435
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3436
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3437
      else:
3438
        # Lock all nodes as all groups are potential targets
3439
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3440

    
3441
  def CheckPrereq(self):
3442
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3443
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3444
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3445

    
3446
    assert (self.req_target_uuids is None or
3447
            owned_groups.issuperset(self.req_target_uuids))
3448
    assert owned_instances == set([self.op.instance_name])
3449

    
3450
    # Get instance information
3451
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3452

    
3453
    # Check if node groups for locked instance are still correct
3454
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3455
      ("Instance %s's nodes changed while we kept the lock" %
3456
       self.op.instance_name)
3457

    
3458
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3459
                                          owned_groups)
3460

    
3461
    if self.req_target_uuids:
3462
      # User requested specific target groups
3463
      self.target_uuids = frozenset(self.req_target_uuids)
3464
    else:
3465
      # All groups except those used by the instance are potential targets
3466
      self.target_uuids = owned_groups - inst_groups
3467

    
3468
    conflicting_groups = self.target_uuids & inst_groups
3469
    if conflicting_groups:
3470
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3471
                                 " used by the instance '%s'" %
3472
                                 (utils.CommaJoin(conflicting_groups),
3473
                                  self.op.instance_name),
3474
                                 errors.ECODE_INVAL)
3475

    
3476
    if not self.target_uuids:
3477
      raise errors.OpPrereqError("There are no possible target groups",
3478
                                 errors.ECODE_INVAL)
3479

    
3480
  def BuildHooksEnv(self):
3481
    """Build hooks env.
3482

3483
    """
3484
    assert self.target_uuids
3485

    
3486
    env = {
3487
      "TARGET_GROUPS": " ".join(self.target_uuids),
3488
      }
3489

    
3490
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3491

    
3492
    return env
3493

    
3494
  def BuildHooksNodes(self):
3495
    """Build hooks nodes.
3496

3497
    """
3498
    mn = self.cfg.GetMasterNode()
3499
    return ([mn], [mn])
3500

    
3501
  def Exec(self, feedback_fn):
3502
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3503

    
3504
    assert instances == [self.op.instance_name], "Instance not locked"
3505

    
3506
    req = iallocator.IAReqGroupChange(instances=instances,
3507
                                      target_groups=list(self.target_uuids))
3508
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3509

    
3510
    ial.Run(self.op.iallocator)
3511

    
3512
    if not ial.success:
3513
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3514
                                 " instance '%s' using iallocator '%s': %s" %
3515
                                 (self.op.instance_name, self.op.iallocator,
3516
                                  ial.info), errors.ECODE_NORES)
3517

    
3518
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3519

    
3520
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3521
                 " instance '%s'", len(jobs), self.op.instance_name)
3522

    
3523
    return ResultWithJobs(jobs)