Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ ff34fb97

History | View | Annotate | Download (135.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
64

    
65
import ganeti.masterd.instance
66

    
67

    
68
#: Type description for changes as returned by L{_ApplyContainerMods}'s
69
#: callbacks
70
_TApplyContModsCbChanges = \
71
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
72
    ht.TNonEmptyString,
73
    ht.TAny,
74
    ])))
75

    
76

    
77
def _CheckHostnameSane(lu, name):
78
  """Ensures that a given hostname resolves to a 'sane' name.
79

80
  The given name is required to be a prefix of the resolved hostname,
81
  to prevent accidental mismatches.
82

83
  @param lu: the logical unit on behalf of which we're checking
84
  @param name: the name we should resolve and check
85
  @return: the resolved hostname object
86

87
  """
88
  hostname = netutils.GetHostname(name=name)
89
  if hostname.name != name:
90
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91
  if not utils.MatchNameComponent(name, [hostname.name]):
92
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93
                                " same as given hostname '%s'") %
94
                               (hostname.name, name), errors.ECODE_INVAL)
95
  return hostname
96

    
97

    
98
def _CheckOpportunisticLocking(op):
99
  """Generate error if opportunistic locking is not possible.
100

101
  """
102
  if op.opportunistic_locking and not op.iallocator:
103
    raise errors.OpPrereqError("Opportunistic locking is only available in"
104
                               " combination with an instance allocator",
105
                               errors.ECODE_INVAL)
106

    
107

    
108
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109
  """Wrapper around IAReqInstanceAlloc.
110

111
  @param op: The instance opcode
112
  @param disks: The computed disks
113
  @param nics: The computed nics
114
  @param beparams: The full filled beparams
115
  @param node_whitelist: List of nodes which should appear as online to the
116
    allocator (unless the node is already marked offline)
117

118
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
119

120
  """
121
  spindle_use = beparams[constants.BE_SPINDLE_USE]
122
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123
                                       disk_template=op.disk_template,
124
                                       tags=op.tags,
125
                                       os=op.os_type,
126
                                       vcpus=beparams[constants.BE_VCPUS],
127
                                       memory=beparams[constants.BE_MAXMEM],
128
                                       spindle_use=spindle_use,
129
                                       disks=disks,
130
                                       nics=[n.ToDict() for n in nics],
131
                                       hypervisor=op.hypervisor,
132
                                       node_whitelist=node_whitelist)
133

    
134

    
135
def _ComputeFullBeParams(op, cluster):
136
  """Computes the full beparams.
137

138
  @param op: The instance opcode
139
  @param cluster: The cluster config object
140

141
  @return: The fully filled beparams
142

143
  """
144
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
145
  for param, value in op.beparams.iteritems():
146
    if value == constants.VALUE_AUTO:
147
      op.beparams[param] = default_beparams[param]
148
  objects.UpgradeBeParams(op.beparams)
149
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150
  return cluster.SimpleFillBE(op.beparams)
151

    
152

    
153
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154
  """Computes the nics.
155

156
  @param op: The instance opcode
157
  @param cluster: Cluster configuration object
158
  @param default_ip: The default ip to assign
159
  @param cfg: An instance of the configuration object
160
  @param ec_id: Execution context ID
161

162
  @returns: The build up nics
163

164
  """
165
  nics = []
166
  for nic in op.nics:
167
    nic_mode_req = nic.get(constants.INIC_MODE, None)
168
    nic_mode = nic_mode_req
169
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
171

    
172
    net = nic.get(constants.INIC_NETWORK, None)
173
    link = nic.get(constants.NIC_LINK, None)
174
    ip = nic.get(constants.INIC_IP, None)
175

    
176
    if net is None or net.lower() == constants.VALUE_NONE:
177
      net = None
178
    else:
179
      if nic_mode_req is not None or link is not None:
180
        raise errors.OpPrereqError("If network is given, no mode or link"
181
                                   " is allowed to be passed",
182
                                   errors.ECODE_INVAL)
183

    
184
    # ip validity checks
185
    if ip is None or ip.lower() == constants.VALUE_NONE:
186
      nic_ip = None
187
    elif ip.lower() == constants.VALUE_AUTO:
188
      if not op.name_check:
189
        raise errors.OpPrereqError("IP address set to auto but name checks"
190
                                   " have been skipped",
191
                                   errors.ECODE_INVAL)
192
      nic_ip = default_ip
193
    else:
194
      # We defer pool operations until later, so that the iallocator has
195
      # filled in the instance's node(s) dimara
196
      if ip.lower() == constants.NIC_IP_POOL:
197
        if net is None:
198
          raise errors.OpPrereqError("if ip=pool, parameter network"
199
                                     " must be passed too",
200
                                     errors.ECODE_INVAL)
201

    
202
      elif not netutils.IPAddress.IsValid(ip):
203
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
204
                                   errors.ECODE_INVAL)
205

    
206
      nic_ip = ip
207

    
208
    # TODO: check the ip address for uniqueness
209
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
211
                                 errors.ECODE_INVAL)
212

    
213
    # MAC address verification
214
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216
      mac = utils.NormalizeAndValidateMac(mac)
217

    
218
      try:
219
        # TODO: We need to factor this out
220
        cfg.ReserveMAC(mac, ec_id)
221
      except errors.ReservationError:
222
        raise errors.OpPrereqError("MAC address %s already in use"
223
                                   " in cluster" % mac,
224
                                   errors.ECODE_NOTUNIQUE)
225

    
226
    #  Build nic parameters
227
    nicparams = {}
228
    if nic_mode_req:
229
      nicparams[constants.NIC_MODE] = nic_mode
230
    if link:
231
      nicparams[constants.NIC_LINK] = link
232

    
233
    check_params = cluster.SimpleFillNIC(nicparams)
234
    objects.NIC.CheckParameterSyntax(check_params)
235
    net_uuid = cfg.LookupNetwork(net)
236
    name = nic.get(constants.INIC_NAME, None)
237
    if name is not None and name.lower() == constants.VALUE_NONE:
238
      name = None
239
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240
                          network=net_uuid, nicparams=nicparams)
241
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
242
    nics.append(nic_obj)
243

    
244
  return nics
245

    
246

    
247
def _CheckForConflictingIp(lu, ip, node):
248
  """In case of conflicting IP address raise error.
249

250
  @type ip: string
251
  @param ip: IP address
252
  @type node: string
253
  @param node: node name
254

255
  """
256
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257
  if conf_net is not None:
258
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259
                                " network %s, but the target NIC does not." %
260
                                (ip, conf_net)),
261
                               errors.ECODE_STATE)
262

    
263
  return (None, None)
264

    
265

    
266
def _ComputeIPolicyInstanceSpecViolation(
267
  ipolicy, instance_spec, disk_template,
268
  _compute_fn=ComputeIPolicySpecViolation):
269
  """Compute if instance specs meets the specs of ipolicy.
270

271
  @type ipolicy: dict
272
  @param ipolicy: The ipolicy to verify against
273
  @param instance_spec: dict
274
  @param instance_spec: The instance spec to verify
275
  @type disk_template: string
276
  @param disk_template: the disk template of the instance
277
  @param _compute_fn: The function to verify ipolicy (unittest only)
278
  @see: L{ComputeIPolicySpecViolation}
279

280
  """
281
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
287

    
288
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289
                     disk_sizes, spindle_use, disk_template)
290

    
291

    
292
def _CheckOSVariant(os_obj, name):
293
  """Check whether an OS name conforms to the os variants specification.
294

295
  @type os_obj: L{objects.OS}
296
  @param os_obj: OS object to check
297
  @type name: string
298
  @param name: OS name passed by the user, to check for validity
299

300
  """
301
  variant = objects.OS.GetVariant(name)
302
  if not os_obj.supported_variants:
303
    if variant:
304
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305
                                 " passed)" % (os_obj.name, variant),
306
                                 errors.ECODE_INVAL)
307
    return
308
  if not variant:
309
    raise errors.OpPrereqError("OS name must include a variant",
310
                               errors.ECODE_INVAL)
311

    
312
  if variant not in os_obj.supported_variants:
313
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
314

    
315

    
316
class LUInstanceCreate(LogicalUnit):
317
  """Create an instance.
318

319
  """
320
  HPATH = "instance-add"
321
  HTYPE = constants.HTYPE_INSTANCE
322
  REQ_BGL = False
323

    
324
  def CheckArguments(self):
325
    """Check arguments.
326

327
    """
328
    # do not require name_check to ease forward/backward compatibility
329
    # for tools
330
    if self.op.no_install and self.op.start:
331
      self.LogInfo("No-installation mode selected, disabling startup")
332
      self.op.start = False
333
    # validate/normalize the instance name
334
    self.op.instance_name = \
335
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
336

    
337
    if self.op.ip_check and not self.op.name_check:
338
      # TODO: make the ip check more flexible and not depend on the name check
339
      raise errors.OpPrereqError("Cannot do IP address check without a name"
340
                                 " check", errors.ECODE_INVAL)
341

    
342
    # check nics' parameter names
343
    for nic in self.op.nics:
344
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345
    # check that NIC's parameters names are unique and valid
346
    utils.ValidateDeviceNames("NIC", self.op.nics)
347

    
348
    # check that disk's names are unique and valid
349
    utils.ValidateDeviceNames("disk", self.op.disks)
350

    
351
    cluster = self.cfg.GetClusterInfo()
352
    if not self.op.disk_template in cluster.enabled_disk_templates:
353
      raise errors.OpPrereqError("Cannot create an instance with disk template"
354
                                 " '%s', because it is not enabled in the"
355
                                 " cluster. Enabled disk templates are: %s." %
356
                                 (self.op.disk_template,
357
                                  ",".join(cluster.enabled_disk_templates)))
358

    
359
    # check disks. parameter names and consistent adopt/no-adopt strategy
360
    has_adopt = has_no_adopt = False
361
    for disk in self.op.disks:
362
      if self.op.disk_template != constants.DT_EXT:
363
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364
      if constants.IDISK_ADOPT in disk:
365
        has_adopt = True
366
      else:
367
        has_no_adopt = True
368
    if has_adopt and has_no_adopt:
369
      raise errors.OpPrereqError("Either all disks are adopted or none is",
370
                                 errors.ECODE_INVAL)
371
    if has_adopt:
372
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373
        raise errors.OpPrereqError("Disk adoption is not supported for the"
374
                                   " '%s' disk template" %
375
                                   self.op.disk_template,
376
                                   errors.ECODE_INVAL)
377
      if self.op.iallocator is not None:
378
        raise errors.OpPrereqError("Disk adoption not allowed with an"
379
                                   " iallocator script", errors.ECODE_INVAL)
380
      if self.op.mode == constants.INSTANCE_IMPORT:
381
        raise errors.OpPrereqError("Disk adoption not allowed for"
382
                                   " instance import", errors.ECODE_INVAL)
383
    else:
384
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
385
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386
                                   " but no 'adopt' parameter given" %
387
                                   self.op.disk_template,
388
                                   errors.ECODE_INVAL)
389

    
390
    self.adopt_disks = has_adopt
391

    
392
    # instance name verification
393
    if self.op.name_check:
394
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395
      self.op.instance_name = self.hostname1.name
396
      # used in CheckPrereq for ip ping check
397
      self.check_ip = self.hostname1.ip
398
    else:
399
      self.check_ip = None
400

    
401
    # file storage checks
402
    if (self.op.file_driver and
403
        not self.op.file_driver in constants.FILE_DRIVER):
404
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
405
                                 self.op.file_driver, errors.ECODE_INVAL)
406

    
407
    # set default file_driver if unset and required
408
    if (not self.op.file_driver and
409
        self.op.disk_template in [constants.DT_FILE,
410
                                  constants.DT_SHARED_FILE]):
411
      self.op.file_driver = constants.FD_LOOP
412

    
413
    if self.op.disk_template == constants.DT_FILE:
414
      opcodes.RequireFileStorage()
415
    elif self.op.disk_template == constants.DT_SHARED_FILE:
416
      opcodes.RequireSharedFileStorage()
417

    
418
    ### Node/iallocator related checks
419
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
420

    
421
    if self.op.pnode is not None:
422
      if self.op.disk_template in constants.DTS_INT_MIRROR:
423
        if self.op.snode is None:
424
          raise errors.OpPrereqError("The networked disk templates need"
425
                                     " a mirror node", errors.ECODE_INVAL)
426
      elif self.op.snode:
427
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
428
                        " template")
429
        self.op.snode = None
430

    
431
    _CheckOpportunisticLocking(self.op)
432

    
433
    self._cds = GetClusterDomainSecret()
434

    
435
    if self.op.mode == constants.INSTANCE_IMPORT:
436
      # On import force_variant must be True, because if we forced it at
437
      # initial install, our only chance when importing it back is that it
438
      # works again!
439
      self.op.force_variant = True
440

    
441
      if self.op.no_install:
442
        self.LogInfo("No-installation mode has no effect during import")
443

    
444
    elif self.op.mode == constants.INSTANCE_CREATE:
445
      if self.op.os_type is None:
446
        raise errors.OpPrereqError("No guest OS specified",
447
                                   errors.ECODE_INVAL)
448
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
449
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
450
                                   " installation" % self.op.os_type,
451
                                   errors.ECODE_STATE)
452
      if self.op.disk_template is None:
453
        raise errors.OpPrereqError("No disk template specified",
454
                                   errors.ECODE_INVAL)
455

    
456
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
457
      # Check handshake to ensure both clusters have the same domain secret
458
      src_handshake = self.op.source_handshake
459
      if not src_handshake:
460
        raise errors.OpPrereqError("Missing source handshake",
461
                                   errors.ECODE_INVAL)
462

    
463
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
464
                                                           src_handshake)
465
      if errmsg:
466
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
467
                                   errors.ECODE_INVAL)
468

    
469
      # Load and check source CA
470
      self.source_x509_ca_pem = self.op.source_x509_ca
471
      if not self.source_x509_ca_pem:
472
        raise errors.OpPrereqError("Missing source X509 CA",
473
                                   errors.ECODE_INVAL)
474

    
475
      try:
476
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
477
                                                    self._cds)
478
      except OpenSSL.crypto.Error, err:
479
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
480
                                   (err, ), errors.ECODE_INVAL)
481

    
482
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
483
      if errcode is not None:
484
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
485
                                   errors.ECODE_INVAL)
486

    
487
      self.source_x509_ca = cert
488

    
489
      src_instance_name = self.op.source_instance_name
490
      if not src_instance_name:
491
        raise errors.OpPrereqError("Missing source instance name",
492
                                   errors.ECODE_INVAL)
493

    
494
      self.source_instance_name = \
495
        netutils.GetHostname(name=src_instance_name).name
496

    
497
    else:
498
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
499
                                 self.op.mode, errors.ECODE_INVAL)
500

    
501
  def ExpandNames(self):
502
    """ExpandNames for CreateInstance.
503

504
    Figure out the right locks for instance creation.
505

506
    """
507
    self.needed_locks = {}
508

    
509
    instance_name = self.op.instance_name
510
    # this is just a preventive check, but someone might still add this
511
    # instance in the meantime, and creation will fail at lock-add time
512
    if instance_name in self.cfg.GetInstanceList():
513
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
514
                                 instance_name, errors.ECODE_EXISTS)
515

    
516
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
517

    
518
    if self.op.iallocator:
519
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
520
      # specifying a group on instance creation and then selecting nodes from
521
      # that group
522
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
523
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
524

    
525
      if self.op.opportunistic_locking:
526
        self.opportunistic_locks[locking.LEVEL_NODE] = True
527
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
528
    else:
529
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
530
      nodelist = [self.op.pnode]
531
      if self.op.snode is not None:
532
        self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
533
        nodelist.append(self.op.snode)
534
      self.needed_locks[locking.LEVEL_NODE] = nodelist
535

    
536
    # in case of import lock the source node too
537
    if self.op.mode == constants.INSTANCE_IMPORT:
538
      src_node = self.op.src_node
539
      src_path = self.op.src_path
540

    
541
      if src_path is None:
542
        self.op.src_path = src_path = self.op.instance_name
543

    
544
      if src_node is None:
545
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
546
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
547
        self.op.src_node = None
548
        if os.path.isabs(src_path):
549
          raise errors.OpPrereqError("Importing an instance from a path"
550
                                     " requires a source node option",
551
                                     errors.ECODE_INVAL)
552
      else:
553
        self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
554
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
555
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
556
        if not os.path.isabs(src_path):
557
          self.op.src_path = src_path = \
558
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
559

    
560
    self.needed_locks[locking.LEVEL_NODE_RES] = \
561
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
562

    
563
  def _RunAllocator(self):
564
    """Run the allocator based on input opcode.
565

566
    """
567
    if self.op.opportunistic_locking:
568
      # Only consider nodes for which a lock is held
569
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
570
    else:
571
      node_whitelist = None
572

    
573
    #TODO Export network to iallocator so that it chooses a pnode
574
    #     in a nodegroup that has the desired network connected to
575
    req = _CreateInstanceAllocRequest(self.op, self.disks,
576
                                      self.nics, self.be_full,
577
                                      node_whitelist)
578
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
579

    
580
    ial.Run(self.op.iallocator)
581

    
582
    if not ial.success:
583
      # When opportunistic locks are used only a temporary failure is generated
584
      if self.op.opportunistic_locking:
585
        ecode = errors.ECODE_TEMP_NORES
586
      else:
587
        ecode = errors.ECODE_NORES
588

    
589
      raise errors.OpPrereqError("Can't compute nodes using"
590
                                 " iallocator '%s': %s" %
591
                                 (self.op.iallocator, ial.info),
592
                                 ecode)
593

    
594
    self.op.pnode = ial.result[0]
595
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
596
                 self.op.instance_name, self.op.iallocator,
597
                 utils.CommaJoin(ial.result))
598

    
599
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
600

    
601
    if req.RequiredNodes() == 2:
602
      self.op.snode = ial.result[1]
603

    
604
  def BuildHooksEnv(self):
605
    """Build hooks env.
606

607
    This runs on master, primary and secondary nodes of the instance.
608

609
    """
610
    env = {
611
      "ADD_MODE": self.op.mode,
612
      }
613
    if self.op.mode == constants.INSTANCE_IMPORT:
614
      env["SRC_NODE"] = self.op.src_node
615
      env["SRC_PATH"] = self.op.src_path
616
      env["SRC_IMAGES"] = self.src_images
617

    
618
    env.update(BuildInstanceHookEnv(
619
      name=self.op.instance_name,
620
      primary_node=self.op.pnode,
621
      secondary_nodes=self.secondaries,
622
      status=self.op.start,
623
      os_type=self.op.os_type,
624
      minmem=self.be_full[constants.BE_MINMEM],
625
      maxmem=self.be_full[constants.BE_MAXMEM],
626
      vcpus=self.be_full[constants.BE_VCPUS],
627
      nics=NICListToTuple(self, self.nics),
628
      disk_template=self.op.disk_template,
629
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
630
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
631
             for d in self.disks],
632
      bep=self.be_full,
633
      hvp=self.hv_full,
634
      hypervisor_name=self.op.hypervisor,
635
      tags=self.op.tags,
636
      ))
637

    
638
    return env
639

    
640
  def BuildHooksNodes(self):
641
    """Build hooks nodes.
642

643
    """
644
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
645
    return nl, nl
646

    
647
  def _ReadExportInfo(self):
648
    """Reads the export information from disk.
649

650
    It will override the opcode source node and path with the actual
651
    information, if these two were not specified before.
652

653
    @return: the export information
654

655
    """
656
    assert self.op.mode == constants.INSTANCE_IMPORT
657

    
658
    src_node = self.op.src_node
659
    src_path = self.op.src_path
660

    
661
    if src_node is None:
662
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
663
      exp_list = self.rpc.call_export_list(locked_nodes)
664
      found = False
665
      for node in exp_list:
666
        if exp_list[node].fail_msg:
667
          continue
668
        if src_path in exp_list[node].payload:
669
          found = True
670
          self.op.src_node = src_node = node
671
          self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
672
                                                       src_path)
673
          break
674
      if not found:
675
        raise errors.OpPrereqError("No export found for relative path %s" %
676
                                   src_path, errors.ECODE_INVAL)
677

    
678
    CheckNodeOnline(self, src_node)
679
    result = self.rpc.call_export_info(src_node, src_path)
680
    result.Raise("No export or invalid export found in dir %s" % src_path)
681

    
682
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
683
    if not export_info.has_section(constants.INISECT_EXP):
684
      raise errors.ProgrammerError("Corrupted export config",
685
                                   errors.ECODE_ENVIRON)
686

    
687
    ei_version = export_info.get(constants.INISECT_EXP, "version")
688
    if (int(ei_version) != constants.EXPORT_VERSION):
689
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
690
                                 (ei_version, constants.EXPORT_VERSION),
691
                                 errors.ECODE_ENVIRON)
692
    return export_info
693

    
694
  def _ReadExportParams(self, einfo):
695
    """Use export parameters as defaults.
696

697
    In case the opcode doesn't specify (as in override) some instance
698
    parameters, then try to use them from the export information, if
699
    that declares them.
700

701
    """
702
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
703

    
704
    if self.op.disk_template is None:
705
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
706
        self.op.disk_template = einfo.get(constants.INISECT_INS,
707
                                          "disk_template")
708
        if self.op.disk_template not in constants.DISK_TEMPLATES:
709
          raise errors.OpPrereqError("Disk template specified in configuration"
710
                                     " file is not one of the allowed values:"
711
                                     " %s" %
712
                                     " ".join(constants.DISK_TEMPLATES),
713
                                     errors.ECODE_INVAL)
714
      else:
715
        raise errors.OpPrereqError("No disk template specified and the export"
716
                                   " is missing the disk_template information",
717
                                   errors.ECODE_INVAL)
718

    
719
    if not self.op.disks:
720
      disks = []
721
      # TODO: import the disk iv_name too
722
      for idx in range(constants.MAX_DISKS):
723
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
724
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
725
          disks.append({constants.IDISK_SIZE: disk_sz})
726
      self.op.disks = disks
727
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
728
        raise errors.OpPrereqError("No disk info specified and the export"
729
                                   " is missing the disk information",
730
                                   errors.ECODE_INVAL)
731

    
732
    if not self.op.nics:
733
      nics = []
734
      for idx in range(constants.MAX_NICS):
735
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
736
          ndict = {}
737
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
738
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
739
            ndict[name] = v
740
          nics.append(ndict)
741
        else:
742
          break
743
      self.op.nics = nics
744

    
745
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
746
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
747

    
748
    if (self.op.hypervisor is None and
749
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
750
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
751

    
752
    if einfo.has_section(constants.INISECT_HYP):
753
      # use the export parameters but do not override the ones
754
      # specified by the user
755
      for name, value in einfo.items(constants.INISECT_HYP):
756
        if name not in self.op.hvparams:
757
          self.op.hvparams[name] = value
758

    
759
    if einfo.has_section(constants.INISECT_BEP):
760
      # use the parameters, without overriding
761
      for name, value in einfo.items(constants.INISECT_BEP):
762
        if name not in self.op.beparams:
763
          self.op.beparams[name] = value
764
        # Compatibility for the old "memory" be param
765
        if name == constants.BE_MEMORY:
766
          if constants.BE_MAXMEM not in self.op.beparams:
767
            self.op.beparams[constants.BE_MAXMEM] = value
768
          if constants.BE_MINMEM not in self.op.beparams:
769
            self.op.beparams[constants.BE_MINMEM] = value
770
    else:
771
      # try to read the parameters old style, from the main section
772
      for name in constants.BES_PARAMETERS:
773
        if (name not in self.op.beparams and
774
            einfo.has_option(constants.INISECT_INS, name)):
775
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
776

    
777
    if einfo.has_section(constants.INISECT_OSP):
778
      # use the parameters, without overriding
779
      for name, value in einfo.items(constants.INISECT_OSP):
780
        if name not in self.op.osparams:
781
          self.op.osparams[name] = value
782

    
783
  def _RevertToDefaults(self, cluster):
784
    """Revert the instance parameters to the default values.
785

786
    """
787
    # hvparams
788
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
789
    for name in self.op.hvparams.keys():
790
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
791
        del self.op.hvparams[name]
792
    # beparams
793
    be_defs = cluster.SimpleFillBE({})
794
    for name in self.op.beparams.keys():
795
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
796
        del self.op.beparams[name]
797
    # nic params
798
    nic_defs = cluster.SimpleFillNIC({})
799
    for nic in self.op.nics:
800
      for name in constants.NICS_PARAMETERS:
801
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
802
          del nic[name]
803
    # osparams
804
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
805
    for name in self.op.osparams.keys():
806
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
807
        del self.op.osparams[name]
808

    
809
  def _CalculateFileStorageDir(self):
810
    """Calculate final instance file storage dir.
811

812
    """
813
    # file storage dir calculation/check
814
    self.instance_file_storage_dir = None
815
    if self.op.disk_template in constants.DTS_FILEBASED:
816
      # build the full file storage dir path
817
      joinargs = []
818

    
819
      if self.op.disk_template == constants.DT_SHARED_FILE:
820
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
821
      else:
822
        get_fsd_fn = self.cfg.GetFileStorageDir
823

    
824
      cfg_storagedir = get_fsd_fn()
825
      if not cfg_storagedir:
826
        raise errors.OpPrereqError("Cluster file storage dir not defined",
827
                                   errors.ECODE_STATE)
828
      joinargs.append(cfg_storagedir)
829

    
830
      if self.op.file_storage_dir is not None:
831
        joinargs.append(self.op.file_storage_dir)
832

    
833
      joinargs.append(self.op.instance_name)
834

    
835
      # pylint: disable=W0142
836
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
837

    
838
  def CheckPrereq(self): # pylint: disable=R0914
839
    """Check prerequisites.
840

841
    """
842
    self._CalculateFileStorageDir()
843

    
844
    if self.op.mode == constants.INSTANCE_IMPORT:
845
      export_info = self._ReadExportInfo()
846
      self._ReadExportParams(export_info)
847
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
848
    else:
849
      self._old_instance_name = None
850

    
851
    if (not self.cfg.GetVGName() and
852
        self.op.disk_template not in constants.DTS_NOT_LVM):
853
      raise errors.OpPrereqError("Cluster does not support lvm-based"
854
                                 " instances", errors.ECODE_STATE)
855

    
856
    if (self.op.hypervisor is None or
857
        self.op.hypervisor == constants.VALUE_AUTO):
858
      self.op.hypervisor = self.cfg.GetHypervisorType()
859

    
860
    cluster = self.cfg.GetClusterInfo()
861
    enabled_hvs = cluster.enabled_hypervisors
862
    if self.op.hypervisor not in enabled_hvs:
863
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
864
                                 " cluster (%s)" %
865
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
866
                                 errors.ECODE_STATE)
867

    
868
    # Check tag validity
869
    for tag in self.op.tags:
870
      objects.TaggableObject.ValidateTag(tag)
871

    
872
    # check hypervisor parameter syntax (locally)
873
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
874
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
875
                                      self.op.hvparams)
876
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
877
    hv_type.CheckParameterSyntax(filled_hvp)
878
    self.hv_full = filled_hvp
879
    # check that we don't specify global parameters on an instance
880
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
881
                         "instance", "cluster")
882

    
883
    # fill and remember the beparams dict
884
    self.be_full = _ComputeFullBeParams(self.op, cluster)
885

    
886
    # build os parameters
887
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
888

    
889
    # now that hvp/bep are in final format, let's reset to defaults,
890
    # if told to do so
891
    if self.op.identify_defaults:
892
      self._RevertToDefaults(cluster)
893

    
894
    # NIC buildup
895
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
896
                             self.proc.GetECId())
897

    
898
    # disk checks/pre-build
899
    default_vg = self.cfg.GetVGName()
900
    self.disks = ComputeDisks(self.op, default_vg)
901

    
902
    if self.op.mode == constants.INSTANCE_IMPORT:
903
      disk_images = []
904
      for idx in range(len(self.disks)):
905
        option = "disk%d_dump" % idx
906
        if export_info.has_option(constants.INISECT_INS, option):
907
          # FIXME: are the old os-es, disk sizes, etc. useful?
908
          export_name = export_info.get(constants.INISECT_INS, option)
909
          image = utils.PathJoin(self.op.src_path, export_name)
910
          disk_images.append(image)
911
        else:
912
          disk_images.append(False)
913

    
914
      self.src_images = disk_images
915

    
916
      if self.op.instance_name == self._old_instance_name:
917
        for idx, nic in enumerate(self.nics):
918
          if nic.mac == constants.VALUE_AUTO:
919
            nic_mac_ini = "nic%d_mac" % idx
920
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
921

    
922
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
923

    
924
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
925
    if self.op.ip_check:
926
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
927
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
928
                                   (self.check_ip, self.op.instance_name),
929
                                   errors.ECODE_NOTUNIQUE)
930

    
931
    #### mac address generation
932
    # By generating here the mac address both the allocator and the hooks get
933
    # the real final mac address rather than the 'auto' or 'generate' value.
934
    # There is a race condition between the generation and the instance object
935
    # creation, which means that we know the mac is valid now, but we're not
936
    # sure it will be when we actually add the instance. If things go bad
937
    # adding the instance will abort because of a duplicate mac, and the
938
    # creation job will fail.
939
    for nic in self.nics:
940
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
941
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
942

    
943
    #### allocator run
944

    
945
    if self.op.iallocator is not None:
946
      self._RunAllocator()
947

    
948
    # Release all unneeded node locks
949
    keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
950
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
951
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
952
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
953

    
954
    assert (self.owned_locks(locking.LEVEL_NODE) ==
955
            self.owned_locks(locking.LEVEL_NODE_RES)), \
956
      "Node locks differ from node resource locks"
957

    
958
    #### node related checks
959

    
960
    # check primary node
961
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
962
    assert self.pnode is not None, \
963
      "Cannot retrieve locked node %s" % self.op.pnode
964
    if pnode.offline:
965
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
966
                                 pnode.name, errors.ECODE_STATE)
967
    if pnode.drained:
968
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
969
                                 pnode.name, errors.ECODE_STATE)
970
    if not pnode.vm_capable:
971
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
972
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
973

    
974
    self.secondaries = []
975

    
976
    # Fill in any IPs from IP pools. This must happen here, because we need to
977
    # know the nic's primary node, as specified by the iallocator
978
    for idx, nic in enumerate(self.nics):
979
      net_uuid = nic.network
980
      if net_uuid is not None:
981
        nobj = self.cfg.GetNetwork(net_uuid)
982
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
983
        if netparams is None:
984
          raise errors.OpPrereqError("No netparams found for network"
985
                                     " %s. Propably not connected to"
986
                                     " node's %s nodegroup" %
987
                                     (nobj.name, self.pnode.name),
988
                                     errors.ECODE_INVAL)
989
        self.LogInfo("NIC/%d inherits netparams %s" %
990
                     (idx, netparams.values()))
991
        nic.nicparams = dict(netparams)
992
        if nic.ip is not None:
993
          if nic.ip.lower() == constants.NIC_IP_POOL:
994
            try:
995
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
996
            except errors.ReservationError:
997
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
998
                                         " from the address pool" % idx,
999
                                         errors.ECODE_STATE)
1000
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1001
          else:
1002
            try:
1003
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1004
            except errors.ReservationError:
1005
              raise errors.OpPrereqError("IP address %s already in use"
1006
                                         " or does not belong to network %s" %
1007
                                         (nic.ip, nobj.name),
1008
                                         errors.ECODE_NOTUNIQUE)
1009

    
1010
      # net is None, ip None or given
1011
      elif self.op.conflicts_check:
1012
        _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1013

    
1014
    # mirror node verification
1015
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1016
      if self.op.snode == pnode.name:
1017
        raise errors.OpPrereqError("The secondary node cannot be the"
1018
                                   " primary node", errors.ECODE_INVAL)
1019
      CheckNodeOnline(self, self.op.snode)
1020
      CheckNodeNotDrained(self, self.op.snode)
1021
      CheckNodeVmCapable(self, self.op.snode)
1022
      self.secondaries.append(self.op.snode)
1023

    
1024
      snode = self.cfg.GetNodeInfo(self.op.snode)
1025
      if pnode.group != snode.group:
1026
        self.LogWarning("The primary and secondary nodes are in two"
1027
                        " different node groups; the disk parameters"
1028
                        " from the first disk's node group will be"
1029
                        " used")
1030

    
1031
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1032
      nodes = [pnode]
1033
      if self.op.disk_template in constants.DTS_INT_MIRROR:
1034
        nodes.append(snode)
1035
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1036
      if compat.any(map(has_es, nodes)):
1037
        raise errors.OpPrereqError("Disk template %s not supported with"
1038
                                   " exclusive storage" % self.op.disk_template,
1039
                                   errors.ECODE_STATE)
1040

    
1041
    nodenames = [pnode.name] + self.secondaries
1042

    
1043
    if not self.adopt_disks:
1044
      if self.op.disk_template == constants.DT_RBD:
1045
        # _CheckRADOSFreeSpace() is just a placeholder.
1046
        # Any function that checks prerequisites can be placed here.
1047
        # Check if there is enough space on the RADOS cluster.
1048
        CheckRADOSFreeSpace()
1049
      elif self.op.disk_template == constants.DT_EXT:
1050
        # FIXME: Function that checks prereqs if needed
1051
        pass
1052
      else:
1053
        # Check lv size requirements, if not adopting
1054
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1055
        CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1056

    
1057
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1058
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1059
                                disk[constants.IDISK_ADOPT])
1060
                     for disk in self.disks])
1061
      if len(all_lvs) != len(self.disks):
1062
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1063
                                   errors.ECODE_INVAL)
1064
      for lv_name in all_lvs:
1065
        try:
1066
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1067
          # to ReserveLV uses the same syntax
1068
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1069
        except errors.ReservationError:
1070
          raise errors.OpPrereqError("LV named %s used by another instance" %
1071
                                     lv_name, errors.ECODE_NOTUNIQUE)
1072

    
1073
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1074
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1075

    
1076
      node_lvs = self.rpc.call_lv_list([pnode.name],
1077
                                       vg_names.payload.keys())[pnode.name]
1078
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1079
      node_lvs = node_lvs.payload
1080

    
1081
      delta = all_lvs.difference(node_lvs.keys())
1082
      if delta:
1083
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1084
                                   utils.CommaJoin(delta),
1085
                                   errors.ECODE_INVAL)
1086
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1087
      if online_lvs:
1088
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1089
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1090
                                   errors.ECODE_STATE)
1091
      # update the size of disk based on what is found
1092
      for dsk in self.disks:
1093
        dsk[constants.IDISK_SIZE] = \
1094
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1095
                                        dsk[constants.IDISK_ADOPT])][0]))
1096

    
1097
    elif self.op.disk_template == constants.DT_BLOCK:
1098
      # Normalize and de-duplicate device paths
1099
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1100
                       for disk in self.disks])
1101
      if len(all_disks) != len(self.disks):
1102
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1103
                                   errors.ECODE_INVAL)
1104
      baddisks = [d for d in all_disks
1105
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1106
      if baddisks:
1107
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1108
                                   " cannot be adopted" %
1109
                                   (utils.CommaJoin(baddisks),
1110
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1111
                                   errors.ECODE_INVAL)
1112

    
1113
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
1114
                                            list(all_disks))[pnode.name]
1115
      node_disks.Raise("Cannot get block device information from node %s" %
1116
                       pnode.name)
1117
      node_disks = node_disks.payload
1118
      delta = all_disks.difference(node_disks.keys())
1119
      if delta:
1120
        raise errors.OpPrereqError("Missing block device(s): %s" %
1121
                                   utils.CommaJoin(delta),
1122
                                   errors.ECODE_INVAL)
1123
      for dsk in self.disks:
1124
        dsk[constants.IDISK_SIZE] = \
1125
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1126

    
1127
    # Verify instance specs
1128
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1129
    ispec = {
1130
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1131
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1132
      constants.ISPEC_DISK_COUNT: len(self.disks),
1133
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1134
                                  for disk in self.disks],
1135
      constants.ISPEC_NIC_COUNT: len(self.nics),
1136
      constants.ISPEC_SPINDLE_USE: spindle_use,
1137
      }
1138

    
1139
    group_info = self.cfg.GetNodeGroup(pnode.group)
1140
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1141
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1142
                                               self.op.disk_template)
1143
    if not self.op.ignore_ipolicy and res:
1144
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1145
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1146
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1147

    
1148
    CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1149

    
1150
    CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1151
    # check OS parameters (remotely)
1152
    CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1153

    
1154
    CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1155

    
1156
    #TODO: _CheckExtParams (remotely)
1157
    # Check parameters for extstorage
1158

    
1159
    # memory check on primary node
1160
    #TODO(dynmem): use MINMEM for checking
1161
    if self.op.start:
1162
      CheckNodeFreeMemory(self, self.pnode.name,
1163
                          "creating instance %s" % self.op.instance_name,
1164
                          self.be_full[constants.BE_MAXMEM],
1165
                          self.op.hypervisor)
1166

    
1167
    self.dry_run_result = list(nodenames)
1168

    
1169
  def Exec(self, feedback_fn):
1170
    """Create and add the instance to the cluster.
1171

1172
    """
1173
    instance = self.op.instance_name
1174
    pnode_name = self.pnode.name
1175

    
1176
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1177
                self.owned_locks(locking.LEVEL_NODE)), \
1178
      "Node locks differ from node resource locks"
1179
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1180

    
1181
    ht_kind = self.op.hypervisor
1182
    if ht_kind in constants.HTS_REQ_PORT:
1183
      network_port = self.cfg.AllocatePort()
1184
    else:
1185
      network_port = None
1186

    
1187
    # This is ugly but we got a chicken-egg problem here
1188
    # We can only take the group disk parameters, as the instance
1189
    # has no disks yet (we are generating them right here).
1190
    node = self.cfg.GetNodeInfo(pnode_name)
1191
    nodegroup = self.cfg.GetNodeGroup(node.group)
1192
    disks = GenerateDiskTemplate(self,
1193
                                 self.op.disk_template,
1194
                                 instance, pnode_name,
1195
                                 self.secondaries,
1196
                                 self.disks,
1197
                                 self.instance_file_storage_dir,
1198
                                 self.op.file_driver,
1199
                                 0,
1200
                                 feedback_fn,
1201
                                 self.cfg.GetGroupDiskParams(nodegroup))
1202

    
1203
    iobj = objects.Instance(name=instance, os=self.op.os_type,
1204
                            primary_node=pnode_name,
1205
                            nics=self.nics, disks=disks,
1206
                            disk_template=self.op.disk_template,
1207
                            disks_active=False,
1208
                            admin_state=constants.ADMINST_DOWN,
1209
                            network_port=network_port,
1210
                            beparams=self.op.beparams,
1211
                            hvparams=self.op.hvparams,
1212
                            hypervisor=self.op.hypervisor,
1213
                            osparams=self.op.osparams,
1214
                            )
1215

    
1216
    if self.op.tags:
1217
      for tag in self.op.tags:
1218
        iobj.AddTag(tag)
1219

    
1220
    if self.adopt_disks:
1221
      if self.op.disk_template == constants.DT_PLAIN:
1222
        # rename LVs to the newly-generated names; we need to construct
1223
        # 'fake' LV disks with the old data, plus the new unique_id
1224
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1225
        rename_to = []
1226
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1227
          rename_to.append(t_dsk.logical_id)
1228
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1229
          self.cfg.SetDiskID(t_dsk, pnode_name)
1230
        result = self.rpc.call_blockdev_rename(pnode_name,
1231
                                               zip(tmp_disks, rename_to))
1232
        result.Raise("Failed to rename adoped LVs")
1233
    else:
1234
      feedback_fn("* creating instance disks...")
1235
      try:
1236
        CreateDisks(self, iobj)
1237
      except errors.OpExecError:
1238
        self.LogWarning("Device creation failed")
1239
        self.cfg.ReleaseDRBDMinors(instance)
1240
        raise
1241

    
1242
    feedback_fn("adding instance %s to cluster config" % instance)
1243

    
1244
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1245

    
1246
    # Declare that we don't want to remove the instance lock anymore, as we've
1247
    # added the instance to the config
1248
    del self.remove_locks[locking.LEVEL_INSTANCE]
1249

    
1250
    if self.op.mode == constants.INSTANCE_IMPORT:
1251
      # Release unused nodes
1252
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1253
    else:
1254
      # Release all nodes
1255
      ReleaseLocks(self, locking.LEVEL_NODE)
1256

    
1257
    disk_abort = False
1258
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1259
      feedback_fn("* wiping instance disks...")
1260
      try:
1261
        WipeDisks(self, iobj)
1262
      except errors.OpExecError, err:
1263
        logging.exception("Wiping disks failed")
1264
        self.LogWarning("Wiping instance disks failed (%s)", err)
1265
        disk_abort = True
1266

    
1267
    if disk_abort:
1268
      # Something is already wrong with the disks, don't do anything else
1269
      pass
1270
    elif self.op.wait_for_sync:
1271
      disk_abort = not WaitForSync(self, iobj)
1272
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1273
      # make sure the disks are not degraded (still sync-ing is ok)
1274
      feedback_fn("* checking mirrors status")
1275
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1276
    else:
1277
      disk_abort = False
1278

    
1279
    if disk_abort:
1280
      RemoveDisks(self, iobj)
1281
      self.cfg.RemoveInstance(iobj.name)
1282
      # Make sure the instance lock gets removed
1283
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1284
      raise errors.OpExecError("There are some degraded disks for"
1285
                               " this instance")
1286

    
1287
    # instance disks are now active
1288
    iobj.disks_active = True
1289

    
1290
    # Release all node resource locks
1291
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1292

    
1293
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1294
      # we need to set the disks ID to the primary node, since the
1295
      # preceding code might or might have not done it, depending on
1296
      # disk template and other options
1297
      for disk in iobj.disks:
1298
        self.cfg.SetDiskID(disk, pnode_name)
1299
      if self.op.mode == constants.INSTANCE_CREATE:
1300
        if not self.op.no_install:
1301
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1302
                        not self.op.wait_for_sync)
1303
          if pause_sync:
1304
            feedback_fn("* pausing disk sync to install instance OS")
1305
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1306
                                                              (iobj.disks,
1307
                                                               iobj), True)
1308
            for idx, success in enumerate(result.payload):
1309
              if not success:
1310
                logging.warn("pause-sync of instance %s for disk %d failed",
1311
                             instance, idx)
1312

    
1313
          feedback_fn("* running the instance OS create scripts...")
1314
          # FIXME: pass debug option from opcode to backend
1315
          os_add_result = \
1316
            self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1317
                                          self.op.debug_level)
1318
          if pause_sync:
1319
            feedback_fn("* resuming disk sync")
1320
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1321
                                                              (iobj.disks,
1322
                                                               iobj), False)
1323
            for idx, success in enumerate(result.payload):
1324
              if not success:
1325
                logging.warn("resume-sync of instance %s for disk %d failed",
1326
                             instance, idx)
1327

    
1328
          os_add_result.Raise("Could not add os for instance %s"
1329
                              " on node %s" % (instance, pnode_name))
1330

    
1331
      else:
1332
        if self.op.mode == constants.INSTANCE_IMPORT:
1333
          feedback_fn("* running the instance OS import scripts...")
1334

    
1335
          transfers = []
1336

    
1337
          for idx, image in enumerate(self.src_images):
1338
            if not image:
1339
              continue
1340

    
1341
            # FIXME: pass debug option from opcode to backend
1342
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1343
                                               constants.IEIO_FILE, (image, ),
1344
                                               constants.IEIO_SCRIPT,
1345
                                               (iobj.disks[idx], idx),
1346
                                               None)
1347
            transfers.append(dt)
1348

    
1349
          import_result = \
1350
            masterd.instance.TransferInstanceData(self, feedback_fn,
1351
                                                  self.op.src_node, pnode_name,
1352
                                                  self.pnode.secondary_ip,
1353
                                                  iobj, transfers)
1354
          if not compat.all(import_result):
1355
            self.LogWarning("Some disks for instance %s on node %s were not"
1356
                            " imported successfully" % (instance, pnode_name))
1357

    
1358
          rename_from = self._old_instance_name
1359

    
1360
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1361
          feedback_fn("* preparing remote import...")
1362
          # The source cluster will stop the instance before attempting to make
1363
          # a connection. In some cases stopping an instance can take a long
1364
          # time, hence the shutdown timeout is added to the connection
1365
          # timeout.
1366
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1367
                             self.op.source_shutdown_timeout)
1368
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1369

    
1370
          assert iobj.primary_node == self.pnode.name
1371
          disk_results = \
1372
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1373
                                          self.source_x509_ca,
1374
                                          self._cds, timeouts)
1375
          if not compat.all(disk_results):
1376
            # TODO: Should the instance still be started, even if some disks
1377
            # failed to import (valid for local imports, too)?
1378
            self.LogWarning("Some disks for instance %s on node %s were not"
1379
                            " imported successfully" % (instance, pnode_name))
1380

    
1381
          rename_from = self.source_instance_name
1382

    
1383
        else:
1384
          # also checked in the prereq part
1385
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1386
                                       % self.op.mode)
1387

    
1388
        # Run rename script on newly imported instance
1389
        assert iobj.name == instance
1390
        feedback_fn("Running rename script for %s" % instance)
1391
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1392
                                                   rename_from,
1393
                                                   self.op.debug_level)
1394
        if result.fail_msg:
1395
          self.LogWarning("Failed to run rename script for %s on node"
1396
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
1397

    
1398
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1399

    
1400
    if self.op.start:
1401
      iobj.admin_state = constants.ADMINST_UP
1402
      self.cfg.Update(iobj, feedback_fn)
1403
      logging.info("Starting instance %s on node %s", instance, pnode_name)
1404
      feedback_fn("* starting instance...")
1405
      result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1406
                                            False, self.op.reason)
1407
      result.Raise("Could not start instance")
1408

    
1409
    return list(iobj.all_nodes)
1410

    
1411

    
1412
class LUInstanceRename(LogicalUnit):
1413
  """Rename an instance.
1414

1415
  """
1416
  HPATH = "instance-rename"
1417
  HTYPE = constants.HTYPE_INSTANCE
1418

    
1419
  def CheckArguments(self):
1420
    """Check arguments.
1421

1422
    """
1423
    if self.op.ip_check and not self.op.name_check:
1424
      # TODO: make the ip check more flexible and not depend on the name check
1425
      raise errors.OpPrereqError("IP address check requires a name check",
1426
                                 errors.ECODE_INVAL)
1427

    
1428
  def BuildHooksEnv(self):
1429
    """Build hooks env.
1430

1431
    This runs on master, primary and secondary nodes of the instance.
1432

1433
    """
1434
    env = BuildInstanceHookEnvByObject(self, self.instance)
1435
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1436
    return env
1437

    
1438
  def BuildHooksNodes(self):
1439
    """Build hooks nodes.
1440

1441
    """
1442
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1443
    return (nl, nl)
1444

    
1445
  def CheckPrereq(self):
1446
    """Check prerequisites.
1447

1448
    This checks that the instance is in the cluster and is not running.
1449

1450
    """
1451
    self.op.instance_name = ExpandInstanceName(self.cfg,
1452
                                               self.op.instance_name)
1453
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1454
    assert instance is not None
1455
    CheckNodeOnline(self, instance.primary_node)
1456
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1457
                       msg="cannot rename")
1458
    self.instance = instance
1459

    
1460
    new_name = self.op.new_name
1461
    if self.op.name_check:
1462
      hostname = _CheckHostnameSane(self, new_name)
1463
      new_name = self.op.new_name = hostname.name
1464
      if (self.op.ip_check and
1465
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1466
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1467
                                   (hostname.ip, new_name),
1468
                                   errors.ECODE_NOTUNIQUE)
1469

    
1470
    instance_list = self.cfg.GetInstanceList()
1471
    if new_name in instance_list and new_name != instance.name:
1472
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1473
                                 new_name, errors.ECODE_EXISTS)
1474

    
1475
  def Exec(self, feedback_fn):
1476
    """Rename the instance.
1477

1478
    """
1479
    inst = self.instance
1480
    old_name = inst.name
1481

    
1482
    rename_file_storage = False
1483
    if (inst.disk_template in constants.DTS_FILEBASED and
1484
        self.op.new_name != inst.name):
1485
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1486
      rename_file_storage = True
1487

    
1488
    self.cfg.RenameInstance(inst.name, self.op.new_name)
1489
    # Change the instance lock. This is definitely safe while we hold the BGL.
1490
    # Otherwise the new lock would have to be added in acquired mode.
1491
    assert self.REQ_BGL
1492
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1493
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1494
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1495

    
1496
    # re-read the instance from the configuration after rename
1497
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
1498

    
1499
    if rename_file_storage:
1500
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1501
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1502
                                                     old_file_storage_dir,
1503
                                                     new_file_storage_dir)
1504
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1505
                   " (but the instance has been renamed in Ganeti)" %
1506
                   (inst.primary_node, old_file_storage_dir,
1507
                    new_file_storage_dir))
1508

    
1509
    StartInstanceDisks(self, inst, None)
1510
    # update info on disks
1511
    info = GetInstanceInfoText(inst)
1512
    for (idx, disk) in enumerate(inst.disks):
1513
      for node in inst.all_nodes:
1514
        self.cfg.SetDiskID(disk, node)
1515
        result = self.rpc.call_blockdev_setinfo(node, disk, info)
1516
        if result.fail_msg:
1517
          self.LogWarning("Error setting info on node %s for disk %s: %s",
1518
                          node, idx, result.fail_msg)
1519
    try:
1520
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1521
                                                 old_name, self.op.debug_level)
1522
      msg = result.fail_msg
1523
      if msg:
1524
        msg = ("Could not run OS rename script for instance %s on node %s"
1525
               " (but the instance has been renamed in Ganeti): %s" %
1526
               (inst.name, inst.primary_node, msg))
1527
        self.LogWarning(msg)
1528
    finally:
1529
      ShutdownInstanceDisks(self, inst)
1530

    
1531
    return inst.name
1532

    
1533

    
1534
class LUInstanceRemove(LogicalUnit):
1535
  """Remove an instance.
1536

1537
  """
1538
  HPATH = "instance-remove"
1539
  HTYPE = constants.HTYPE_INSTANCE
1540
  REQ_BGL = False
1541

    
1542
  def ExpandNames(self):
1543
    self._ExpandAndLockInstance()
1544
    self.needed_locks[locking.LEVEL_NODE] = []
1545
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1546
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1547

    
1548
  def DeclareLocks(self, level):
1549
    if level == locking.LEVEL_NODE:
1550
      self._LockInstancesNodes()
1551
    elif level == locking.LEVEL_NODE_RES:
1552
      # Copy node locks
1553
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1554
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1555

    
1556
  def BuildHooksEnv(self):
1557
    """Build hooks env.
1558

1559
    This runs on master, primary and secondary nodes of the instance.
1560

1561
    """
1562
    env = BuildInstanceHookEnvByObject(self, self.instance)
1563
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1564
    return env
1565

    
1566
  def BuildHooksNodes(self):
1567
    """Build hooks nodes.
1568

1569
    """
1570
    nl = [self.cfg.GetMasterNode()]
1571
    nl_post = list(self.instance.all_nodes) + nl
1572
    return (nl, nl_post)
1573

    
1574
  def CheckPrereq(self):
1575
    """Check prerequisites.
1576

1577
    This checks that the instance is in the cluster.
1578

1579
    """
1580
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1581
    assert self.instance is not None, \
1582
      "Cannot retrieve locked instance %s" % self.op.instance_name
1583

    
1584
  def Exec(self, feedback_fn):
1585
    """Remove the instance.
1586

1587
    """
1588
    instance = self.instance
1589
    logging.info("Shutting down instance %s on node %s",
1590
                 instance.name, instance.primary_node)
1591

    
1592
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1593
                                             self.op.shutdown_timeout,
1594
                                             self.op.reason)
1595
    msg = result.fail_msg
1596
    if msg:
1597
      if self.op.ignore_failures:
1598
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
1599
      else:
1600
        raise errors.OpExecError("Could not shutdown instance %s on"
1601
                                 " node %s: %s" %
1602
                                 (instance.name, instance.primary_node, msg))
1603

    
1604
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1605
            self.owned_locks(locking.LEVEL_NODE_RES))
1606
    assert not (set(instance.all_nodes) -
1607
                self.owned_locks(locking.LEVEL_NODE)), \
1608
      "Not owning correct locks"
1609

    
1610
    RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1611

    
1612

    
1613
class LUInstanceMove(LogicalUnit):
1614
  """Move an instance by data-copying.
1615

1616
  """
1617
  HPATH = "instance-move"
1618
  HTYPE = constants.HTYPE_INSTANCE
1619
  REQ_BGL = False
1620

    
1621
  def ExpandNames(self):
1622
    self._ExpandAndLockInstance()
1623
    target_node = ExpandNodeName(self.cfg, self.op.target_node)
1624
    self.op.target_node = target_node
1625
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
1626
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1627
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1628

    
1629
  def DeclareLocks(self, level):
1630
    if level == locking.LEVEL_NODE:
1631
      self._LockInstancesNodes(primary_only=True)
1632
    elif level == locking.LEVEL_NODE_RES:
1633
      # Copy node locks
1634
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1635
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1636

    
1637
  def BuildHooksEnv(self):
1638
    """Build hooks env.
1639

1640
    This runs on master, primary and secondary nodes of the instance.
1641

1642
    """
1643
    env = {
1644
      "TARGET_NODE": self.op.target_node,
1645
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1646
      }
1647
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1648
    return env
1649

    
1650
  def BuildHooksNodes(self):
1651
    """Build hooks nodes.
1652

1653
    """
1654
    nl = [
1655
      self.cfg.GetMasterNode(),
1656
      self.instance.primary_node,
1657
      self.op.target_node,
1658
      ]
1659
    return (nl, nl)
1660

    
1661
  def CheckPrereq(self):
1662
    """Check prerequisites.
1663

1664
    This checks that the instance is in the cluster.
1665

1666
    """
1667
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1668
    assert self.instance is not None, \
1669
      "Cannot retrieve locked instance %s" % self.op.instance_name
1670

    
1671
    if instance.disk_template not in constants.DTS_COPYABLE:
1672
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1673
                                 instance.disk_template, errors.ECODE_STATE)
1674

    
1675
    node = self.cfg.GetNodeInfo(self.op.target_node)
1676
    assert node is not None, \
1677
      "Cannot retrieve locked node %s" % self.op.target_node
1678

    
1679
    self.target_node = target_node = node.name
1680

    
1681
    if target_node == instance.primary_node:
1682
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1683
                                 (instance.name, target_node),
1684
                                 errors.ECODE_STATE)
1685

    
1686
    bep = self.cfg.GetClusterInfo().FillBE(instance)
1687

    
1688
    for idx, dsk in enumerate(instance.disks):
1689
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1690
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1691
                                   " cannot copy" % idx, errors.ECODE_STATE)
1692

    
1693
    CheckNodeOnline(self, target_node)
1694
    CheckNodeNotDrained(self, target_node)
1695
    CheckNodeVmCapable(self, target_node)
1696
    cluster = self.cfg.GetClusterInfo()
1697
    group_info = self.cfg.GetNodeGroup(node.group)
1698
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1699
    CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1700
                           ignore=self.op.ignore_ipolicy)
1701

    
1702
    if instance.admin_state == constants.ADMINST_UP:
1703
      # check memory requirements on the secondary node
1704
      CheckNodeFreeMemory(self, target_node,
1705
                          "failing over instance %s" %
1706
                          instance.name, bep[constants.BE_MAXMEM],
1707
                          instance.hypervisor)
1708
    else:
1709
      self.LogInfo("Not checking memory on the secondary node as"
1710
                   " instance will not be started")
1711

    
1712
    # check bridge existance
1713
    CheckInstanceBridgesExist(self, instance, node=target_node)
1714

    
1715
  def Exec(self, feedback_fn):
1716
    """Move an instance.
1717

1718
    The move is done by shutting it down on its present node, copying
1719
    the data over (slow) and starting it on the new node.
1720

1721
    """
1722
    instance = self.instance
1723

    
1724
    source_node = instance.primary_node
1725
    target_node = self.target_node
1726

    
1727
    self.LogInfo("Shutting down instance %s on source node %s",
1728
                 instance.name, source_node)
1729

    
1730
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1731
            self.owned_locks(locking.LEVEL_NODE_RES))
1732

    
1733
    result = self.rpc.call_instance_shutdown(source_node, instance,
1734
                                             self.op.shutdown_timeout,
1735
                                             self.op.reason)
1736
    msg = result.fail_msg
1737
    if msg:
1738
      if self.op.ignore_consistency:
1739
        self.LogWarning("Could not shutdown instance %s on node %s."
1740
                        " Proceeding anyway. Please make sure node"
1741
                        " %s is down. Error details: %s",
1742
                        instance.name, source_node, source_node, msg)
1743
      else:
1744
        raise errors.OpExecError("Could not shutdown instance %s on"
1745
                                 " node %s: %s" %
1746
                                 (instance.name, source_node, msg))
1747

    
1748
    # create the target disks
1749
    try:
1750
      CreateDisks(self, instance, target_node=target_node)
1751
    except errors.OpExecError:
1752
      self.LogWarning("Device creation failed")
1753
      self.cfg.ReleaseDRBDMinors(instance.name)
1754
      raise
1755

    
1756
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1757

    
1758
    errs = []
1759
    # activate, get path, copy the data over
1760
    for idx, disk in enumerate(instance.disks):
1761
      self.LogInfo("Copying data for disk %d", idx)
1762
      result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1763
                                               instance.name, True, idx)
1764
      if result.fail_msg:
1765
        self.LogWarning("Can't assemble newly created disk %d: %s",
1766
                        idx, result.fail_msg)
1767
        errs.append(result.fail_msg)
1768
        break
1769
      dev_path = result.payload
1770
      result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1771
                                             target_node, dev_path,
1772
                                             cluster_name)
1773
      if result.fail_msg:
1774
        self.LogWarning("Can't copy data over for disk %d: %s",
1775
                        idx, result.fail_msg)
1776
        errs.append(result.fail_msg)
1777
        break
1778

    
1779
    if errs:
1780
      self.LogWarning("Some disks failed to copy, aborting")
1781
      try:
1782
        RemoveDisks(self, instance, target_node=target_node)
1783
      finally:
1784
        self.cfg.ReleaseDRBDMinors(instance.name)
1785
        raise errors.OpExecError("Errors during disk copy: %s" %
1786
                                 (",".join(errs),))
1787

    
1788
    instance.primary_node = target_node
1789
    self.cfg.Update(instance, feedback_fn)
1790

    
1791
    self.LogInfo("Removing the disks on the original node")
1792
    RemoveDisks(self, instance, target_node=source_node)
1793

    
1794
    # Only start the instance if it's marked as up
1795
    if instance.admin_state == constants.ADMINST_UP:
1796
      self.LogInfo("Starting instance %s on node %s",
1797
                   instance.name, target_node)
1798

    
1799
      disks_ok, _ = AssembleInstanceDisks(self, instance,
1800
                                          ignore_secondaries=True)
1801
      if not disks_ok:
1802
        ShutdownInstanceDisks(self, instance)
1803
        raise errors.OpExecError("Can't activate the instance's disks")
1804

    
1805
      result = self.rpc.call_instance_start(target_node,
1806
                                            (instance, None, None), False,
1807
                                            self.op.reason)
1808
      msg = result.fail_msg
1809
      if msg:
1810
        ShutdownInstanceDisks(self, instance)
1811
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1812
                                 (instance.name, target_node, msg))
1813

    
1814

    
1815
class LUInstanceMultiAlloc(NoHooksLU):
1816
  """Allocates multiple instances at the same time.
1817

1818
  """
1819
  REQ_BGL = False
1820

    
1821
  def CheckArguments(self):
1822
    """Check arguments.
1823

1824
    """
1825
    nodes = []
1826
    for inst in self.op.instances:
1827
      if inst.iallocator is not None:
1828
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1829
                                   " instance objects", errors.ECODE_INVAL)
1830
      nodes.append(bool(inst.pnode))
1831
      if inst.disk_template in constants.DTS_INT_MIRROR:
1832
        nodes.append(bool(inst.snode))
1833

    
1834
    has_nodes = compat.any(nodes)
1835
    if compat.all(nodes) ^ has_nodes:
1836
      raise errors.OpPrereqError("There are instance objects providing"
1837
                                 " pnode/snode while others do not",
1838
                                 errors.ECODE_INVAL)
1839

    
1840
    if not has_nodes and self.op.iallocator is None:
1841
      default_iallocator = self.cfg.GetDefaultIAllocator()
1842
      if default_iallocator:
1843
        self.op.iallocator = default_iallocator
1844
      else:
1845
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1846
                                   " given and no cluster-wide default"
1847
                                   " iallocator found; please specify either"
1848
                                   " an iallocator or nodes on the instances"
1849
                                   " or set a cluster-wide default iallocator",
1850
                                   errors.ECODE_INVAL)
1851

    
1852
    _CheckOpportunisticLocking(self.op)
1853

    
1854
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1855
    if dups:
1856
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1857
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1858

    
1859
  def ExpandNames(self):
1860
    """Calculate the locks.
1861

1862
    """
1863
    self.share_locks = ShareAll()
1864
    self.needed_locks = {
1865
      # iallocator will select nodes and even if no iallocator is used,
1866
      # collisions with LUInstanceCreate should be avoided
1867
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1868
      }
1869

    
1870
    if self.op.iallocator:
1871
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1872
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1873

    
1874
      if self.op.opportunistic_locking:
1875
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1876
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1877
    else:
1878
      nodeslist = []
1879
      for inst in self.op.instances:
1880
        inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1881
        nodeslist.append(inst.pnode)
1882
        if inst.snode is not None:
1883
          inst.snode = ExpandNodeName(self.cfg, inst.snode)
1884
          nodeslist.append(inst.snode)
1885

    
1886
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1887
      # Lock resources of instance's primary and secondary nodes (copy to
1888
      # prevent accidential modification)
1889
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1890

    
1891
  def CheckPrereq(self):
1892
    """Check prerequisite.
1893

1894
    """
1895
    if self.op.iallocator:
1896
      cluster = self.cfg.GetClusterInfo()
1897
      default_vg = self.cfg.GetVGName()
1898
      ec_id = self.proc.GetECId()
1899

    
1900
      if self.op.opportunistic_locking:
1901
        # Only consider nodes for which a lock is held
1902
        node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1903
      else:
1904
        node_whitelist = None
1905

    
1906
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1907
                                           _ComputeNics(op, cluster, None,
1908
                                                        self.cfg, ec_id),
1909
                                           _ComputeFullBeParams(op, cluster),
1910
                                           node_whitelist)
1911
               for op in self.op.instances]
1912

    
1913
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1914
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1915

    
1916
      ial.Run(self.op.iallocator)
1917

    
1918
      if not ial.success:
1919
        raise errors.OpPrereqError("Can't compute nodes using"
1920
                                   " iallocator '%s': %s" %
1921
                                   (self.op.iallocator, ial.info),
1922
                                   errors.ECODE_NORES)
1923

    
1924
      self.ia_result = ial.result
1925

    
1926
    if self.op.dry_run:
1927
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1928
        constants.JOB_IDS_KEY: [],
1929
        })
1930

    
1931
  def _ConstructPartialResult(self):
1932
    """Contructs the partial result.
1933

1934
    """
1935
    if self.op.iallocator:
1936
      (allocatable, failed_insts) = self.ia_result
1937
      allocatable_insts = map(compat.fst, allocatable)
1938
    else:
1939
      allocatable_insts = [op.instance_name for op in self.op.instances]
1940
      failed_insts = []
1941

    
1942
    return {
1943
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
1944
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
1945
      }
1946

    
1947
  def Exec(self, feedback_fn):
1948
    """Executes the opcode.
1949

1950
    """
1951
    jobs = []
1952
    if self.op.iallocator:
1953
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
1954
      (allocatable, failed) = self.ia_result
1955

    
1956
      for (name, nodes) in allocatable:
1957
        op = op2inst.pop(name)
1958

    
1959
        if len(nodes) > 1:
1960
          (op.pnode, op.snode) = nodes
1961
        else:
1962
          (op.pnode,) = nodes
1963

    
1964
        jobs.append([op])
1965

    
1966
      missing = set(op2inst.keys()) - set(failed)
1967
      assert not missing, \
1968
        "Iallocator did return incomplete result: %s" % \
1969
        utils.CommaJoin(missing)
1970
    else:
1971
      jobs.extend([op] for op in self.op.instances)
1972

    
1973
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
1974

    
1975

    
1976
class _InstNicModPrivate:
1977
  """Data structure for network interface modifications.
1978

1979
  Used by L{LUInstanceSetParams}.
1980

1981
  """
1982
  def __init__(self):
1983
    self.params = None
1984
    self.filled = None
1985

    
1986

    
1987
def _PrepareContainerMods(mods, private_fn):
1988
  """Prepares a list of container modifications by adding a private data field.
1989

1990
  @type mods: list of tuples; (operation, index, parameters)
1991
  @param mods: List of modifications
1992
  @type private_fn: callable or None
1993
  @param private_fn: Callable for constructing a private data field for a
1994
    modification
1995
  @rtype: list
1996

1997
  """
1998
  if private_fn is None:
1999
    fn = lambda: None
2000
  else:
2001
    fn = private_fn
2002

    
2003
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2004

    
2005

    
2006
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
2007
  """Checks if nodes have enough physical CPUs
2008

2009
  This function checks if all given nodes have the needed number of
2010
  physical CPUs. In case any node has less CPUs or we cannot get the
2011
  information from the node, this function raises an OpPrereqError
2012
  exception.
2013

2014
  @type lu: C{LogicalUnit}
2015
  @param lu: a logical unit from which we get configuration data
2016
  @type nodenames: C{list}
2017
  @param nodenames: the list of node names to check
2018
  @type requested: C{int}
2019
  @param requested: the minimum acceptable number of physical CPUs
2020
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2021
      or we cannot check the node
2022

2023
  """
2024
  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2025
  for node in nodenames:
2026
    info = nodeinfo[node]
2027
    info.Raise("Cannot get current information from node %s" % node,
2028
               prereq=True, ecode=errors.ECODE_ENVIRON)
2029
    (_, _, (hv_info, )) = info.payload
2030
    num_cpus = hv_info.get("cpu_total", None)
2031
    if not isinstance(num_cpus, int):
2032
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2033
                                 " on node %s, result was '%s'" %
2034
                                 (node, num_cpus), errors.ECODE_ENVIRON)
2035
    if requested > num_cpus:
2036
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2037
                                 "required" % (node, num_cpus, requested),
2038
                                 errors.ECODE_NORES)
2039

    
2040

    
2041
def GetItemFromContainer(identifier, kind, container):
2042
  """Return the item refered by the identifier.
2043

2044
  @type identifier: string
2045
  @param identifier: Item index or name or UUID
2046
  @type kind: string
2047
  @param kind: One-word item description
2048
  @type container: list
2049
  @param container: Container to get the item from
2050

2051
  """
2052
  # Index
2053
  try:
2054
    idx = int(identifier)
2055
    if idx == -1:
2056
      # Append
2057
      absidx = len(container) - 1
2058
    elif idx < 0:
2059
      raise IndexError("Not accepting negative indices other than -1")
2060
    elif idx > len(container):
2061
      raise IndexError("Got %s index %s, but there are only %s" %
2062
                       (kind, idx, len(container)))
2063
    else:
2064
      absidx = idx
2065
    return (absidx, container[idx])
2066
  except ValueError:
2067
    pass
2068

    
2069
  for idx, item in enumerate(container):
2070
    if item.uuid == identifier or item.name == identifier:
2071
      return (idx, item)
2072

    
2073
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2074
                             (kind, identifier), errors.ECODE_NOENT)
2075

    
2076

    
2077
def _ApplyContainerMods(kind, container, chgdesc, mods,
2078
                        create_fn, modify_fn, remove_fn):
2079
  """Applies descriptions in C{mods} to C{container}.
2080

2081
  @type kind: string
2082
  @param kind: One-word item description
2083
  @type container: list
2084
  @param container: Container to modify
2085
  @type chgdesc: None or list
2086
  @param chgdesc: List of applied changes
2087
  @type mods: list
2088
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2089
  @type create_fn: callable
2090
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2091
    receives absolute item index, parameters and private data object as added
2092
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2093
    as list
2094
  @type modify_fn: callable
2095
  @param modify_fn: Callback for modifying an existing item
2096
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2097
    and private data object as added by L{_PrepareContainerMods}, returns
2098
    changes as list
2099
  @type remove_fn: callable
2100
  @param remove_fn: Callback on removing item; receives absolute item index,
2101
    item and private data object as added by L{_PrepareContainerMods}
2102

2103
  """
2104
  for (op, identifier, params, private) in mods:
2105
    changes = None
2106

    
2107
    if op == constants.DDM_ADD:
2108
      # Calculate where item will be added
2109
      # When adding an item, identifier can only be an index
2110
      try:
2111
        idx = int(identifier)
2112
      except ValueError:
2113
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2114
                                   " identifier for %s" % constants.DDM_ADD,
2115
                                   errors.ECODE_INVAL)
2116
      if idx == -1:
2117
        addidx = len(container)
2118
      else:
2119
        if idx < 0:
2120
          raise IndexError("Not accepting negative indices other than -1")
2121
        elif idx > len(container):
2122
          raise IndexError("Got %s index %s, but there are only %s" %
2123
                           (kind, idx, len(container)))
2124
        addidx = idx
2125

    
2126
      if create_fn is None:
2127
        item = params
2128
      else:
2129
        (item, changes) = create_fn(addidx, params, private)
2130

    
2131
      if idx == -1:
2132
        container.append(item)
2133
      else:
2134
        assert idx >= 0
2135
        assert idx <= len(container)
2136
        # list.insert does so before the specified index
2137
        container.insert(idx, item)
2138
    else:
2139
      # Retrieve existing item
2140
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2141

    
2142
      if op == constants.DDM_REMOVE:
2143
        assert not params
2144

    
2145
        if remove_fn is not None:
2146
          remove_fn(absidx, item, private)
2147

    
2148
        changes = [("%s/%s" % (kind, absidx), "remove")]
2149

    
2150
        assert container[absidx] == item
2151
        del container[absidx]
2152
      elif op == constants.DDM_MODIFY:
2153
        if modify_fn is not None:
2154
          changes = modify_fn(absidx, item, params, private)
2155
      else:
2156
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2157

    
2158
    assert _TApplyContModsCbChanges(changes)
2159

    
2160
    if not (chgdesc is None or changes is None):
2161
      chgdesc.extend(changes)
2162

    
2163

    
2164
def _UpdateIvNames(base_index, disks):
2165
  """Updates the C{iv_name} attribute of disks.
2166

2167
  @type disks: list of L{objects.Disk}
2168

2169
  """
2170
  for (idx, disk) in enumerate(disks):
2171
    disk.iv_name = "disk/%s" % (base_index + idx, )
2172

    
2173

    
2174
class LUInstanceSetParams(LogicalUnit):
2175
  """Modifies an instances's parameters.
2176

2177
  """
2178
  HPATH = "instance-modify"
2179
  HTYPE = constants.HTYPE_INSTANCE
2180
  REQ_BGL = False
2181

    
2182
  @staticmethod
2183
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2184
    assert ht.TList(mods)
2185
    assert not mods or len(mods[0]) in (2, 3)
2186

    
2187
    if mods and len(mods[0]) == 2:
2188
      result = []
2189

    
2190
      addremove = 0
2191
      for op, params in mods:
2192
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2193
          result.append((op, -1, params))
2194
          addremove += 1
2195

    
2196
          if addremove > 1:
2197
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2198
                                       " supported at a time" % kind,
2199
                                       errors.ECODE_INVAL)
2200
        else:
2201
          result.append((constants.DDM_MODIFY, op, params))
2202

    
2203
      assert verify_fn(result)
2204
    else:
2205
      result = mods
2206

    
2207
    return result
2208

    
2209
  @staticmethod
2210
  def _CheckMods(kind, mods, key_types, item_fn):
2211
    """Ensures requested disk/NIC modifications are valid.
2212

2213
    """
2214
    for (op, _, params) in mods:
2215
      assert ht.TDict(params)
2216

    
2217
      # If 'key_types' is an empty dict, we assume we have an
2218
      # 'ext' template and thus do not ForceDictType
2219
      if key_types:
2220
        utils.ForceDictType(params, key_types)
2221

    
2222
      if op == constants.DDM_REMOVE:
2223
        if params:
2224
          raise errors.OpPrereqError("No settings should be passed when"
2225
                                     " removing a %s" % kind,
2226
                                     errors.ECODE_INVAL)
2227
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2228
        item_fn(op, params)
2229
      else:
2230
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2231

    
2232
  @staticmethod
2233
  def _VerifyDiskModification(op, params):
2234
    """Verifies a disk modification.
2235

2236
    """
2237
    if op == constants.DDM_ADD:
2238
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2239
      if mode not in constants.DISK_ACCESS_SET:
2240
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2241
                                   errors.ECODE_INVAL)
2242

    
2243
      size = params.get(constants.IDISK_SIZE, None)
2244
      if size is None:
2245
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2246
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2247

    
2248
      try:
2249
        size = int(size)
2250
      except (TypeError, ValueError), err:
2251
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2252
                                   errors.ECODE_INVAL)
2253

    
2254
      params[constants.IDISK_SIZE] = size
2255
      name = params.get(constants.IDISK_NAME, None)
2256
      if name is not None and name.lower() == constants.VALUE_NONE:
2257
        params[constants.IDISK_NAME] = None
2258

    
2259
    elif op == constants.DDM_MODIFY:
2260
      if constants.IDISK_SIZE in params:
2261
        raise errors.OpPrereqError("Disk size change not possible, use"
2262
                                   " grow-disk", errors.ECODE_INVAL)
2263
      if len(params) > 2:
2264
        raise errors.OpPrereqError("Disk modification doesn't support"
2265
                                   " additional arbitrary parameters",
2266
                                   errors.ECODE_INVAL)
2267
      name = params.get(constants.IDISK_NAME, None)
2268
      if name is not None and name.lower() == constants.VALUE_NONE:
2269
        params[constants.IDISK_NAME] = None
2270

    
2271
  @staticmethod
2272
  def _VerifyNicModification(op, params):
2273
    """Verifies a network interface modification.
2274

2275
    """
2276
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2277
      ip = params.get(constants.INIC_IP, None)
2278
      name = params.get(constants.INIC_NAME, None)
2279
      req_net = params.get(constants.INIC_NETWORK, None)
2280
      link = params.get(constants.NIC_LINK, None)
2281
      mode = params.get(constants.NIC_MODE, None)
2282
      if name is not None and name.lower() == constants.VALUE_NONE:
2283
        params[constants.INIC_NAME] = None
2284
      if req_net is not None:
2285
        if req_net.lower() == constants.VALUE_NONE:
2286
          params[constants.INIC_NETWORK] = None
2287
          req_net = None
2288
        elif link is not None or mode is not None:
2289
          raise errors.OpPrereqError("If network is given"
2290
                                     " mode or link should not",
2291
                                     errors.ECODE_INVAL)
2292

    
2293
      if op == constants.DDM_ADD:
2294
        macaddr = params.get(constants.INIC_MAC, None)
2295
        if macaddr is None:
2296
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2297

    
2298
      if ip is not None:
2299
        if ip.lower() == constants.VALUE_NONE:
2300
          params[constants.INIC_IP] = None
2301
        else:
2302
          if ip.lower() == constants.NIC_IP_POOL:
2303
            if op == constants.DDM_ADD and req_net is None:
2304
              raise errors.OpPrereqError("If ip=pool, parameter network"
2305
                                         " cannot be none",
2306
                                         errors.ECODE_INVAL)
2307
          else:
2308
            if not netutils.IPAddress.IsValid(ip):
2309
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2310
                                         errors.ECODE_INVAL)
2311

    
2312
      if constants.INIC_MAC in params:
2313
        macaddr = params[constants.INIC_MAC]
2314
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2315
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2316

    
2317
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2318
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2319
                                     " modifying an existing NIC",
2320
                                     errors.ECODE_INVAL)
2321

    
2322
  def CheckArguments(self):
2323
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2324
            self.op.hvparams or self.op.beparams or self.op.os_name or
2325
            self.op.osparams or self.op.offline is not None or
2326
            self.op.runtime_mem or self.op.pnode):
2327
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2328

    
2329
    if self.op.hvparams:
2330
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2331
                           "hypervisor", "instance", "cluster")
2332

    
2333
    self.op.disks = self._UpgradeDiskNicMods(
2334
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2335
    self.op.nics = self._UpgradeDiskNicMods(
2336
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2337

    
2338
    if self.op.disks and self.op.disk_template is not None:
2339
      raise errors.OpPrereqError("Disk template conversion and other disk"
2340
                                 " changes not supported at the same time",
2341
                                 errors.ECODE_INVAL)
2342

    
2343
    if (self.op.disk_template and
2344
        self.op.disk_template in constants.DTS_INT_MIRROR and
2345
        self.op.remote_node is None):
2346
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2347
                                 " one requires specifying a secondary node",
2348
                                 errors.ECODE_INVAL)
2349

    
2350
    # Check NIC modifications
2351
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2352
                    self._VerifyNicModification)
2353

    
2354
    if self.op.pnode:
2355
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2356

    
2357
  def ExpandNames(self):
2358
    self._ExpandAndLockInstance()
2359
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2360
    # Can't even acquire node locks in shared mode as upcoming changes in
2361
    # Ganeti 2.6 will start to modify the node object on disk conversion
2362
    self.needed_locks[locking.LEVEL_NODE] = []
2363
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2364
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2365
    # Look node group to look up the ipolicy
2366
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2367

    
2368
  def DeclareLocks(self, level):
2369
    if level == locking.LEVEL_NODEGROUP:
2370
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2371
      # Acquire locks for the instance's nodegroups optimistically. Needs
2372
      # to be verified in CheckPrereq
2373
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2374
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2375
    elif level == locking.LEVEL_NODE:
2376
      self._LockInstancesNodes()
2377
      if self.op.disk_template and self.op.remote_node:
2378
        self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2379
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2380
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2381
      # Copy node locks
2382
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2383
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2384

    
2385
  def BuildHooksEnv(self):
2386
    """Build hooks env.
2387

2388
    This runs on the master, primary and secondaries.
2389

2390
    """
2391
    args = {}
2392
    if constants.BE_MINMEM in self.be_new:
2393
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2394
    if constants.BE_MAXMEM in self.be_new:
2395
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2396
    if constants.BE_VCPUS in self.be_new:
2397
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2398
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2399
    # information at all.
2400

    
2401
    if self._new_nics is not None:
2402
      nics = []
2403

    
2404
      for nic in self._new_nics:
2405
        n = copy.deepcopy(nic)
2406
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2407
        n.nicparams = nicparams
2408
        nics.append(NICToTuple(self, n))
2409

    
2410
      args["nics"] = nics
2411

    
2412
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2413
    if self.op.disk_template:
2414
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2415
    if self.op.runtime_mem:
2416
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2417

    
2418
    return env
2419

    
2420
  def BuildHooksNodes(self):
2421
    """Build hooks nodes.
2422

2423
    """
2424
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2425
    return (nl, nl)
2426

    
2427
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2428
                              old_params, cluster, pnode):
2429

    
2430
    update_params_dict = dict([(key, params[key])
2431
                               for key in constants.NICS_PARAMETERS
2432
                               if key in params])
2433

    
2434
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2435
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2436

    
2437
    new_net_uuid = None
2438
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2439
    if new_net_uuid_or_name:
2440
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2441
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2442

    
2443
    if old_net_uuid:
2444
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2445

    
2446
    if new_net_uuid:
2447
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2448
      if not netparams:
2449
        raise errors.OpPrereqError("No netparams found for the network"
2450
                                   " %s, probably not connected" %
2451
                                   new_net_obj.name, errors.ECODE_INVAL)
2452
      new_params = dict(netparams)
2453
    else:
2454
      new_params = GetUpdatedParams(old_params, update_params_dict)
2455

    
2456
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2457

    
2458
    new_filled_params = cluster.SimpleFillNIC(new_params)
2459
    objects.NIC.CheckParameterSyntax(new_filled_params)
2460

    
2461
    new_mode = new_filled_params[constants.NIC_MODE]
2462
    if new_mode == constants.NIC_MODE_BRIDGED:
2463
      bridge = new_filled_params[constants.NIC_LINK]
2464
      msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2465
      if msg:
2466
        msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2467
        if self.op.force:
2468
          self.warn.append(msg)
2469
        else:
2470
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2471

    
2472
    elif new_mode == constants.NIC_MODE_ROUTED:
2473
      ip = params.get(constants.INIC_IP, old_ip)
2474
      if ip is None:
2475
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2476
                                   " on a routed NIC", errors.ECODE_INVAL)
2477

    
2478
    elif new_mode == constants.NIC_MODE_OVS:
2479
      # TODO: check OVS link
2480
      self.LogInfo("OVS links are currently not checked for correctness")
2481

    
2482
    if constants.INIC_MAC in params:
2483
      mac = params[constants.INIC_MAC]
2484
      if mac is None:
2485
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2486
                                   errors.ECODE_INVAL)
2487
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2488
        # otherwise generate the MAC address
2489
        params[constants.INIC_MAC] = \
2490
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2491
      else:
2492
        # or validate/reserve the current one
2493
        try:
2494
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2495
        except errors.ReservationError:
2496
          raise errors.OpPrereqError("MAC address '%s' already in use"
2497
                                     " in cluster" % mac,
2498
                                     errors.ECODE_NOTUNIQUE)
2499
    elif new_net_uuid != old_net_uuid:
2500

    
2501
      def get_net_prefix(net_uuid):
2502
        mac_prefix = None
2503
        if net_uuid:
2504
          nobj = self.cfg.GetNetwork(net_uuid)
2505
          mac_prefix = nobj.mac_prefix
2506

    
2507
        return mac_prefix
2508

    
2509
      new_prefix = get_net_prefix(new_net_uuid)
2510
      old_prefix = get_net_prefix(old_net_uuid)
2511
      if old_prefix != new_prefix:
2512
        params[constants.INIC_MAC] = \
2513
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2514

    
2515
    # if there is a change in (ip, network) tuple
2516
    new_ip = params.get(constants.INIC_IP, old_ip)
2517
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2518
      if new_ip:
2519
        # if IP is pool then require a network and generate one IP
2520
        if new_ip.lower() == constants.NIC_IP_POOL:
2521
          if new_net_uuid:
2522
            try:
2523
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2524
            except errors.ReservationError:
2525
              raise errors.OpPrereqError("Unable to get a free IP"
2526
                                         " from the address pool",
2527
                                         errors.ECODE_STATE)
2528
            self.LogInfo("Chose IP %s from network %s",
2529
                         new_ip,
2530
                         new_net_obj.name)
2531
            params[constants.INIC_IP] = new_ip
2532
          else:
2533
            raise errors.OpPrereqError("ip=pool, but no network found",
2534
                                       errors.ECODE_INVAL)
2535
        # Reserve new IP if in the new network if any
2536
        elif new_net_uuid:
2537
          try:
2538
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2539
            self.LogInfo("Reserving IP %s in network %s",
2540
                         new_ip, new_net_obj.name)
2541
          except errors.ReservationError:
2542
            raise errors.OpPrereqError("IP %s not available in network %s" %
2543
                                       (new_ip, new_net_obj.name),
2544
                                       errors.ECODE_NOTUNIQUE)
2545
        # new network is None so check if new IP is a conflicting IP
2546
        elif self.op.conflicts_check:
2547
          _CheckForConflictingIp(self, new_ip, pnode)
2548

    
2549
      # release old IP if old network is not None
2550
      if old_ip and old_net_uuid:
2551
        try:
2552
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2553
        except errors.AddressPoolError:
2554
          logging.warning("Release IP %s not contained in network %s",
2555
                          old_ip, old_net_obj.name)
2556

    
2557
    # there are no changes in (ip, network) tuple and old network is not None
2558
    elif (old_net_uuid is not None and
2559
          (req_link is not None or req_mode is not None)):
2560
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2561
                                 " a NIC that is connected to a network",
2562
                                 errors.ECODE_INVAL)
2563

    
2564
    private.params = new_params
2565
    private.filled = new_filled_params
2566

    
2567
  def _PreCheckDiskTemplate(self, pnode_info):
2568
    """CheckPrereq checks related to a new disk template."""
2569
    # Arguments are passed to avoid configuration lookups
2570
    instance = self.instance
2571
    pnode = instance.primary_node
2572
    cluster = self.cluster
2573
    if instance.disk_template == self.op.disk_template:
2574
      raise errors.OpPrereqError("Instance already has disk template %s" %
2575
                                 instance.disk_template, errors.ECODE_INVAL)
2576

    
2577
    if (instance.disk_template,
2578
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2579
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2580
                                 " %s to %s" % (instance.disk_template,
2581
                                                self.op.disk_template),
2582
                                 errors.ECODE_INVAL)
2583
    CheckInstanceState(self, instance, INSTANCE_DOWN,
2584
                       msg="cannot change disk template")
2585
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2586
      if self.op.remote_node == pnode:
2587
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2588
                                   " as the primary node of the instance" %
2589
                                   self.op.remote_node, errors.ECODE_STATE)
2590
      CheckNodeOnline(self, self.op.remote_node)
2591
      CheckNodeNotDrained(self, self.op.remote_node)
2592
      # FIXME: here we assume that the old instance type is DT_PLAIN
2593
      assert instance.disk_template == constants.DT_PLAIN
2594
      disks = [{constants.IDISK_SIZE: d.size,
2595
                constants.IDISK_VG: d.logical_id[0]}
2596
               for d in instance.disks]
2597
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2598
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2599

    
2600
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2601
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2602
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2603
                                                              snode_group)
2604
      CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2605
                             ignore=self.op.ignore_ipolicy)
2606
      if pnode_info.group != snode_info.group:
2607
        self.LogWarning("The primary and secondary nodes are in two"
2608
                        " different node groups; the disk parameters"
2609
                        " from the first disk's node group will be"
2610
                        " used")
2611

    
2612
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2613
      # Make sure none of the nodes require exclusive storage
2614
      nodes = [pnode_info]
2615
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2616
        assert snode_info
2617
        nodes.append(snode_info)
2618
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2619
      if compat.any(map(has_es, nodes)):
2620
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2621
                  " storage is enabled" % (instance.disk_template,
2622
                                           self.op.disk_template))
2623
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2624

    
2625
  def CheckPrereq(self):
2626
    """Check prerequisites.
2627

2628
    This only checks the instance list against the existing names.
2629

2630
    """
2631
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2632
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2633

    
2634
    cluster = self.cluster = self.cfg.GetClusterInfo()
2635
    assert self.instance is not None, \
2636
      "Cannot retrieve locked instance %s" % self.op.instance_name
2637

    
2638
    pnode = instance.primary_node
2639

    
2640
    self.warn = []
2641

    
2642
    if (self.op.pnode is not None and self.op.pnode != pnode and
2643
        not self.op.force):
2644
      # verify that the instance is not up
2645
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2646
                                                  instance.hypervisor)
2647
      if instance_info.fail_msg:
2648
        self.warn.append("Can't get instance runtime information: %s" %
2649
                         instance_info.fail_msg)
2650
      elif instance_info.payload:
2651
        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2652
                                   errors.ECODE_STATE)
2653

    
2654
    assert pnode in self.owned_locks(locking.LEVEL_NODE)
2655
    nodelist = list(instance.all_nodes)
2656
    pnode_info = self.cfg.GetNodeInfo(pnode)
2657
    self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2658

    
2659
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2660
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2661
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2662

    
2663
    # dictionary with instance information after the modification
2664
    ispec = {}
2665

    
2666
    # Check disk modifications. This is done here and not in CheckArguments
2667
    # (as with NICs), because we need to know the instance's disk template
2668
    if instance.disk_template == constants.DT_EXT:
2669
      self._CheckMods("disk", self.op.disks, {},
2670
                      self._VerifyDiskModification)
2671
    else:
2672
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2673
                      self._VerifyDiskModification)
2674

    
2675
    # Prepare disk/NIC modifications
2676
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2677
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2678

    
2679
    # Check the validity of the `provider' parameter
2680
    if instance.disk_template in constants.DT_EXT:
2681
      for mod in self.diskmod:
2682
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2683
        if mod[0] == constants.DDM_ADD:
2684
          if ext_provider is None:
2685
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2686
                                       " '%s' missing, during disk add" %
2687
                                       (constants.DT_EXT,
2688
                                        constants.IDISK_PROVIDER),
2689
                                       errors.ECODE_NOENT)
2690
        elif mod[0] == constants.DDM_MODIFY:
2691
          if ext_provider:
2692
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2693
                                       " modification" %
2694
                                       constants.IDISK_PROVIDER,
2695
                                       errors.ECODE_INVAL)
2696
    else:
2697
      for mod in self.diskmod:
2698
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2699
        if ext_provider is not None:
2700
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2701
                                     " instances of type '%s'" %
2702
                                     (constants.IDISK_PROVIDER,
2703
                                      constants.DT_EXT),
2704
                                     errors.ECODE_INVAL)
2705

    
2706
    # OS change
2707
    if self.op.os_name and not self.op.force:
2708
      CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2709
                     self.op.force_variant)
2710
      instance_os = self.op.os_name
2711
    else:
2712
      instance_os = instance.os
2713

    
2714
    assert not (self.op.disk_template and self.op.disks), \
2715
      "Can't modify disk template and apply disk changes at the same time"
2716

    
2717
    if self.op.disk_template:
2718
      self._PreCheckDiskTemplate(pnode_info)
2719

    
2720
    # hvparams processing
2721
    if self.op.hvparams:
2722
      hv_type = instance.hypervisor
2723
      i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2724
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2725
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2726

    
2727
      # local check
2728
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2729
      CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2730
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2731
      self.hv_inst = i_hvdict # the new dict (without defaults)
2732
    else:
2733
      self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2734
                                              instance.hvparams)
2735
      self.hv_new = self.hv_inst = {}
2736

    
2737
    # beparams processing
2738
    if self.op.beparams:
2739
      i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2740
                                  use_none=True)
2741
      objects.UpgradeBeParams(i_bedict)
2742
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2743
      be_new = cluster.SimpleFillBE(i_bedict)
2744
      self.be_proposed = self.be_new = be_new # the new actual values
2745
      self.be_inst = i_bedict # the new dict (without defaults)
2746
    else:
2747
      self.be_new = self.be_inst = {}
2748
      self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2749
    be_old = cluster.FillBE(instance)
2750

    
2751
    # CPU param validation -- checking every time a parameter is
2752
    # changed to cover all cases where either CPU mask or vcpus have
2753
    # changed
2754
    if (constants.BE_VCPUS in self.be_proposed and
2755
        constants.HV_CPU_MASK in self.hv_proposed):
2756
      cpu_list = \
2757
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2758
      # Verify mask is consistent with number of vCPUs. Can skip this
2759
      # test if only 1 entry in the CPU mask, which means same mask
2760
      # is applied to all vCPUs.
2761
      if (len(cpu_list) > 1 and
2762
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2763
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2764
                                   " CPU mask [%s]" %
2765
                                   (self.be_proposed[constants.BE_VCPUS],
2766
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2767
                                   errors.ECODE_INVAL)
2768

    
2769
      # Only perform this test if a new CPU mask is given
2770
      if constants.HV_CPU_MASK in self.hv_new:
2771
        # Calculate the largest CPU number requested
2772
        max_requested_cpu = max(map(max, cpu_list))
2773
        # Check that all of the instance's nodes have enough physical CPUs to
2774
        # satisfy the requested CPU mask
2775
        _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2776
                                max_requested_cpu + 1, instance.hypervisor)
2777

    
2778
    # osparams processing
2779
    if self.op.osparams:
2780
      i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2781
      CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2782
      self.os_inst = i_osdict # the new dict (without defaults)
2783
    else:
2784
      self.os_inst = {}
2785

    
2786
    #TODO(dynmem): do the appropriate check involving MINMEM
2787
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2788
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2789
      mem_check_list = [pnode]
2790
      if be_new[constants.BE_AUTO_BALANCE]:
2791
        # either we changed auto_balance to yes or it was from before
2792
        mem_check_list.extend(instance.secondary_nodes)
2793
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2794
                                                  instance.hypervisor)
2795
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2796
                                         [instance.hypervisor], False)
2797
      pninfo = nodeinfo[pnode]
2798
      msg = pninfo.fail_msg
2799
      if msg:
2800
        # Assume the primary node is unreachable and go ahead
2801
        self.warn.append("Can't get info from primary node %s: %s" %
2802
                         (pnode, msg))
2803
      else:
2804
        (_, _, (pnhvinfo, )) = pninfo.payload
2805
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2806
          self.warn.append("Node data from primary node %s doesn't contain"
2807
                           " free memory information" % pnode)
2808
        elif instance_info.fail_msg:
2809
          self.warn.append("Can't get instance runtime information: %s" %
2810
                           instance_info.fail_msg)
2811
        else:
2812
          if instance_info.payload:
2813
            current_mem = int(instance_info.payload["memory"])
2814
          else:
2815
            # Assume instance not running
2816
            # (there is a slight race condition here, but it's not very
2817
            # probable, and we have no other way to check)
2818
            # TODO: Describe race condition
2819
            current_mem = 0
2820
          #TODO(dynmem): do the appropriate check involving MINMEM
2821
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2822
                      pnhvinfo["memory_free"])
2823
          if miss_mem > 0:
2824
            raise errors.OpPrereqError("This change will prevent the instance"
2825
                                       " from starting, due to %d MB of memory"
2826
                                       " missing on its primary node" %
2827
                                       miss_mem, errors.ECODE_NORES)
2828

    
2829
      if be_new[constants.BE_AUTO_BALANCE]:
2830
        for node, nres in nodeinfo.items():
2831
          if node not in instance.secondary_nodes:
2832
            continue
2833
          nres.Raise("Can't get info from secondary node %s" % node,
2834
                     prereq=True, ecode=errors.ECODE_STATE)
2835
          (_, _, (nhvinfo, )) = nres.payload
2836
          if not isinstance(nhvinfo.get("memory_free", None), int):
2837
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2838
                                       " memory information" % node,
2839
                                       errors.ECODE_STATE)
2840
          #TODO(dynmem): do the appropriate check involving MINMEM
2841
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2842
            raise errors.OpPrereqError("This change will prevent the instance"
2843
                                       " from failover to its secondary node"
2844
                                       " %s, due to not enough memory" % node,
2845
                                       errors.ECODE_STATE)
2846

    
2847
    if self.op.runtime_mem:
2848
      remote_info = self.rpc.call_instance_info(instance.primary_node,
2849
                                                instance.name,
2850
                                                instance.hypervisor)
2851
      remote_info.Raise("Error checking node %s" % instance.primary_node)
2852
      if not remote_info.payload: # not running already
2853
        raise errors.OpPrereqError("Instance %s is not running" %
2854
                                   instance.name, errors.ECODE_STATE)
2855

    
2856
      current_memory = remote_info.payload["memory"]
2857
      if (not self.op.force and
2858
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2859
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2860
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2861
                                   " and %d MB of memory unless --force is"
2862
                                   " given" %
2863
                                   (instance.name,
2864
                                    self.be_proposed[constants.BE_MINMEM],
2865
                                    self.be_proposed[constants.BE_MAXMEM]),
2866
                                   errors.ECODE_INVAL)
2867

    
2868
      delta = self.op.runtime_mem - current_memory
2869
      if delta > 0:
2870
        CheckNodeFreeMemory(self, instance.primary_node,
2871
                            "ballooning memory for instance %s" %
2872
                            instance.name, delta, instance.hypervisor)
2873

    
2874
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2875
      raise errors.OpPrereqError("Disk operations not supported for"
2876
                                 " diskless instances", errors.ECODE_INVAL)
2877

    
2878
    def _PrepareNicCreate(_, params, private):
2879
      self._PrepareNicModification(params, private, None, None,
2880
                                   {}, cluster, pnode)
2881
      return (None, None)
2882

    
2883
    def _PrepareNicMod(_, nic, params, private):
2884
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2885
                                   nic.nicparams, cluster, pnode)
2886
      return None
2887

    
2888
    def _PrepareNicRemove(_, params, __):
2889
      ip = params.ip
2890
      net = params.network
2891
      if net is not None and ip is not None:
2892
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2893

    
2894
    # Verify NIC changes (operating on copy)
2895
    nics = instance.nics[:]
2896
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2897
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2898
    if len(nics) > constants.MAX_NICS:
2899
      raise errors.OpPrereqError("Instance has too many network interfaces"
2900
                                 " (%d), cannot add more" % constants.MAX_NICS,
2901
                                 errors.ECODE_STATE)
2902

    
2903
    def _PrepareDiskMod(_, disk, params, __):
2904
      disk.name = params.get(constants.IDISK_NAME, None)
2905

    
2906
    # Verify disk changes (operating on a copy)
2907
    disks = copy.deepcopy(instance.disks)
2908
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2909
                        _PrepareDiskMod, None)
2910
    utils.ValidateDeviceNames("disk", disks)
2911
    if len(disks) > constants.MAX_DISKS:
2912
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2913
                                 " more" % constants.MAX_DISKS,
2914
                                 errors.ECODE_STATE)
2915
    disk_sizes = [disk.size for disk in instance.disks]
2916
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2917
                      self.diskmod if op == constants.DDM_ADD)
2918
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2919
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2920

    
2921
    if self.op.offline is not None and self.op.offline:
2922
      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2923
                         msg="can't change to offline")
2924

    
2925
    # Pre-compute NIC changes (necessary to use result in hooks)
2926
    self._nic_chgdesc = []
2927
    if self.nicmod:
2928
      # Operate on copies as this is still in prereq
2929
      nics = [nic.Copy() for nic in instance.nics]
2930
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2931
                          self._CreateNewNic, self._ApplyNicMods, None)
2932
      # Verify that NIC names are unique and valid
2933
      utils.ValidateDeviceNames("NIC", nics)
2934
      self._new_nics = nics
2935
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2936
    else:
2937
      self._new_nics = None
2938
      ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2939

    
2940
    if not self.op.ignore_ipolicy:
2941
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2942
                                                              group_info)
2943

    
2944
      # Fill ispec with backend parameters
2945
      ispec[constants.ISPEC_SPINDLE_USE] = \
2946
        self.be_new.get(constants.BE_SPINDLE_USE, None)
2947
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2948
                                                         None)
2949

    
2950
      # Copy ispec to verify parameters with min/max values separately
2951
      if self.op.disk_template:
2952
        new_disk_template = self.op.disk_template
2953
      else:
2954
        new_disk_template = instance.disk_template
2955
      ispec_max = ispec.copy()
2956
      ispec_max[constants.ISPEC_MEM_SIZE] = \
2957
        self.be_new.get(constants.BE_MAXMEM, None)
2958
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
2959
                                                     new_disk_template)
2960
      ispec_min = ispec.copy()
2961
      ispec_min[constants.ISPEC_MEM_SIZE] = \
2962
        self.be_new.get(constants.BE_MINMEM, None)
2963
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
2964
                                                     new_disk_template)
2965

    
2966
      if (res_max or res_min):
2967
        # FIXME: Improve error message by including information about whether
2968
        # the upper or lower limit of the parameter fails the ipolicy.
2969
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
2970
               (group_info, group_info.name,
2971
                utils.CommaJoin(set(res_max + res_min))))
2972
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2973

    
2974
  def _ConvertPlainToDrbd(self, feedback_fn):
2975
    """Converts an instance from plain to drbd.
2976

2977
    """
2978
    feedback_fn("Converting template to drbd")
2979
    instance = self.instance
2980
    pnode = instance.primary_node
2981
    snode = self.op.remote_node
2982

    
2983
    assert instance.disk_template == constants.DT_PLAIN
2984

    
2985
    # create a fake disk info for _GenerateDiskTemplate
2986
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
2987
                  constants.IDISK_VG: d.logical_id[0],
2988
                  constants.IDISK_NAME: d.name}
2989
                 for d in instance.disks]
2990
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
2991
                                     instance.name, pnode, [snode],
2992
                                     disk_info, None, None, 0, feedback_fn,
2993
                                     self.diskparams)
2994
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
2995
                                        self.diskparams)
2996
    p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
2997
    s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
2998
    info = GetInstanceInfoText(instance)
2999
    feedback_fn("Creating additional volumes...")
3000
    # first, create the missing data and meta devices
3001
    for disk in anno_disks:
3002
      # unfortunately this is... not too nice
3003
      CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3004
                           info, True, p_excl_stor)
3005
      for child in disk.children:
3006
        CreateSingleBlockDev(self, snode, instance, child, info, True,
3007
                             s_excl_stor)
3008
    # at this stage, all new LVs have been created, we can rename the
3009
    # old ones
3010
    feedback_fn("Renaming original volumes...")
3011
    rename_list = [(o, n.children[0].logical_id)
3012
                   for (o, n) in zip(instance.disks, new_disks)]
3013
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
3014
    result.Raise("Failed to rename original LVs")
3015

    
3016
    feedback_fn("Initializing DRBD devices...")
3017
    # all child devices are in place, we can now create the DRBD devices
3018
    try:
3019
      for disk in anno_disks:
3020
        for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3021
          f_create = node == pnode
3022
          CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3023
                               excl_stor)
3024
    except errors.GenericError, e:
3025
      feedback_fn("Initializing of DRBD devices failed;"
3026
                  " renaming back original volumes...")
3027
      for disk in new_disks:
3028
        self.cfg.SetDiskID(disk, pnode)
3029
      rename_back_list = [(n.children[0], o.logical_id)
3030
                          for (n, o) in zip(new_disks, instance.disks)]
3031
      result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3032
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3033
      raise
3034

    
3035
    # at this point, the instance has been modified
3036
    instance.disk_template = constants.DT_DRBD8
3037
    instance.disks = new_disks
3038
    self.cfg.Update(instance, feedback_fn)
3039

    
3040
    # Release node locks while waiting for sync
3041
    ReleaseLocks(self, locking.LEVEL_NODE)
3042

    
3043
    # disks are created, waiting for sync
3044
    disk_abort = not WaitForSync(self, instance,
3045
                                 oneshot=not self.op.wait_for_sync)
3046
    if disk_abort:
3047
      raise errors.OpExecError("There are some degraded disks for"
3048
                               " this instance, please cleanup manually")
3049

    
3050
    # Node resource locks will be released by caller
3051

    
3052
  def _ConvertDrbdToPlain(self, feedback_fn):
3053
    """Converts an instance from drbd to plain.
3054

3055
    """
3056
    instance = self.instance
3057

    
3058
    assert len(instance.secondary_nodes) == 1
3059
    assert instance.disk_template == constants.DT_DRBD8
3060

    
3061
    pnode = instance.primary_node
3062
    snode = instance.secondary_nodes[0]
3063
    feedback_fn("Converting template to plain")
3064

    
3065
    old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3066
    new_disks = [d.children[0] for d in instance.disks]
3067

    
3068
    # copy over size, mode and name
3069
    for parent, child in zip(old_disks, new_disks):
3070
      child.size = parent.size
3071
      child.mode = parent.mode
3072
      child.name = parent.name
3073

    
3074
    # this is a DRBD disk, return its port to the pool
3075
    # NOTE: this must be done right before the call to cfg.Update!
3076
    for disk in old_disks:
3077
      tcp_port = disk.logical_id[2]
3078
      self.cfg.AddTcpUdpPort(tcp_port)
3079

    
3080
    # update instance structure
3081
    instance.disks = new_disks
3082
    instance.disk_template = constants.DT_PLAIN
3083
    _UpdateIvNames(0, instance.disks)
3084
    self.cfg.Update(instance, feedback_fn)
3085

    
3086
    # Release locks in case removing disks takes a while
3087
    ReleaseLocks(self, locking.LEVEL_NODE)
3088

    
3089
    feedback_fn("Removing volumes on the secondary node...")
3090
    for disk in old_disks:
3091
      self.cfg.SetDiskID(disk, snode)
3092
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3093
      if msg:
3094
        self.LogWarning("Could not remove block device %s on node %s,"
3095
                        " continuing anyway: %s", disk.iv_name, snode, msg)
3096

    
3097
    feedback_fn("Removing unneeded volumes on the primary node...")
3098
    for idx, disk in enumerate(old_disks):
3099
      meta = disk.children[1]
3100
      self.cfg.SetDiskID(meta, pnode)
3101
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3102
      if msg:
3103
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3104
                        " continuing anyway: %s", idx, pnode, msg)
3105

    
3106
  def _CreateNewDisk(self, idx, params, _):
3107
    """Creates a new disk.
3108

3109
    """
3110
    instance = self.instance
3111

    
3112
    # add a new disk
3113
    if instance.disk_template in constants.DTS_FILEBASED:
3114
      (file_driver, file_path) = instance.disks[0].logical_id
3115
      file_path = os.path.dirname(file_path)
3116
    else:
3117
      file_driver = file_path = None
3118

    
3119
    disk = \
3120
      GenerateDiskTemplate(self, instance.disk_template, instance.name,
3121
                           instance.primary_node, instance.secondary_nodes,
3122
                           [params], file_path, file_driver, idx,
3123
                           self.Log, self.diskparams)[0]
3124

    
3125
    new_disks = CreateDisks(self, instance, disks=[disk])
3126

    
3127
    if self.cluster.prealloc_wipe_disks:
3128
      # Wipe new disk
3129
      WipeOrCleanupDisks(self, instance,
3130
                         disks=[(idx, disk, 0)],
3131
                         cleanup=new_disks)
3132

    
3133
    return (disk, [
3134
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3135
      ])
3136

    
3137
  @staticmethod
3138
  def _ModifyDisk(idx, disk, params, _):
3139
    """Modifies a disk.
3140

3141
    """
3142
    changes = []
3143
    mode = params.get(constants.IDISK_MODE, None)
3144
    if mode:
3145
      disk.mode = mode
3146
      changes.append(("disk.mode/%d" % idx, disk.mode))
3147

    
3148
    name = params.get(constants.IDISK_NAME, None)
3149
    disk.name = name
3150
    changes.append(("disk.name/%d" % idx, disk.name))
3151

    
3152
    return changes
3153

    
3154
  def _RemoveDisk(self, idx, root, _):
3155
    """Removes a disk.
3156

3157
    """
3158
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3159
    for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3160
      self.cfg.SetDiskID(disk, node)
3161
      msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3162
      if msg:
3163
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3164
                        " continuing anyway", idx, node, msg)
3165

    
3166
    # if this is a DRBD disk, return its port to the pool
3167
    if root.dev_type in constants.LDS_DRBD:
3168
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3169

    
3170
  def _CreateNewNic(self, idx, params, private):
3171
    """Creates data structure for a new network interface.
3172

3173
    """
3174
    mac = params[constants.INIC_MAC]
3175
    ip = params.get(constants.INIC_IP, None)
3176
    net = params.get(constants.INIC_NETWORK, None)
3177
    name = params.get(constants.INIC_NAME, None)
3178
    net_uuid = self.cfg.LookupNetwork(net)
3179
    #TODO: not private.filled?? can a nic have no nicparams??
3180
    nicparams = private.filled
3181
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3182
                       nicparams=nicparams)
3183
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3184

    
3185
    return (nobj, [
3186
      ("nic.%d" % idx,
3187
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3188
       (mac, ip, private.filled[constants.NIC_MODE],
3189
       private.filled[constants.NIC_LINK],
3190
       net)),
3191
      ])
3192

    
3193
  def _ApplyNicMods(self, idx, nic, params, private):
3194
    """Modifies a network interface.
3195

3196
    """
3197
    changes = []
3198

    
3199
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3200
      if key in params:
3201
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3202
        setattr(nic, key, params[key])
3203

    
3204
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3205
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3206
    if new_net_uuid != nic.network:
3207
      changes.append(("nic.network/%d" % idx, new_net))
3208
      nic.network = new_net_uuid
3209

    
3210
    if private.filled:
3211
      nic.nicparams = private.filled
3212

    
3213
      for (key, val) in nic.nicparams.items():
3214
        changes.append(("nic.%s/%d" % (key, idx), val))
3215

    
3216
    return changes
3217

    
3218
  def Exec(self, feedback_fn):
3219
    """Modifies an instance.
3220

3221
    All parameters take effect only at the next restart of the instance.
3222

3223
    """
3224
    # Process here the warnings from CheckPrereq, as we don't have a
3225
    # feedback_fn there.
3226
    # TODO: Replace with self.LogWarning
3227
    for warn in self.warn:
3228
      feedback_fn("WARNING: %s" % warn)
3229

    
3230
    assert ((self.op.disk_template is None) ^
3231
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3232
      "Not owning any node resource locks"
3233

    
3234
    result = []
3235
    instance = self.instance
3236

    
3237
    # New primary node
3238
    if self.op.pnode:
3239
      instance.primary_node = self.op.pnode
3240

    
3241
    # runtime memory
3242
    if self.op.runtime_mem:
3243
      rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3244
                                                     instance,
3245
                                                     self.op.runtime_mem)
3246
      rpcres.Raise("Cannot modify instance runtime memory")
3247
      result.append(("runtime_memory", self.op.runtime_mem))
3248

    
3249
    # Apply disk changes
3250
    _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3251
                        self._CreateNewDisk, self._ModifyDisk,
3252
                        self._RemoveDisk)
3253
    _UpdateIvNames(0, instance.disks)
3254

    
3255
    if self.op.disk_template:
3256
      if __debug__:
3257
        check_nodes = set(instance.all_nodes)
3258
        if self.op.remote_node:
3259
          check_nodes.add(self.op.remote_node)
3260
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3261
          owned = self.owned_locks(level)
3262
          assert not (check_nodes - owned), \
3263
            ("Not owning the correct locks, owning %r, expected at least %r" %
3264
             (owned, check_nodes))
3265

    
3266
      r_shut = ShutdownInstanceDisks(self, instance)
3267
      if not r_shut:
3268
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3269
                                 " proceed with disk template conversion")
3270
      mode = (instance.disk_template, self.op.disk_template)
3271
      try:
3272
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3273
      except:
3274
        self.cfg.ReleaseDRBDMinors(instance.name)
3275
        raise
3276
      result.append(("disk_template", self.op.disk_template))
3277

    
3278
      assert instance.disk_template == self.op.disk_template, \
3279
        ("Expected disk template '%s', found '%s'" %
3280
         (self.op.disk_template, instance.disk_template))
3281

    
3282
    # Release node and resource locks if there are any (they might already have
3283
    # been released during disk conversion)
3284
    ReleaseLocks(self, locking.LEVEL_NODE)
3285
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3286

    
3287
    # Apply NIC changes
3288
    if self._new_nics is not None:
3289
      instance.nics = self._new_nics
3290
      result.extend(self._nic_chgdesc)
3291

    
3292
    # hvparams changes
3293
    if self.op.hvparams:
3294
      instance.hvparams = self.hv_inst
3295
      for key, val in self.op.hvparams.iteritems():
3296
        result.append(("hv/%s" % key, val))
3297

    
3298
    # beparams changes
3299
    if self.op.beparams:
3300
      instance.beparams = self.be_inst
3301
      for key, val in self.op.beparams.iteritems():
3302
        result.append(("be/%s" % key, val))
3303

    
3304
    # OS change
3305
    if self.op.os_name:
3306
      instance.os = self.op.os_name
3307

    
3308
    # osparams changes
3309
    if self.op.osparams:
3310
      instance.osparams = self.os_inst
3311
      for key, val in self.op.osparams.iteritems():
3312
        result.append(("os/%s" % key, val))
3313

    
3314
    if self.op.offline is None:
3315
      # Ignore
3316
      pass
3317
    elif self.op.offline:
3318
      # Mark instance as offline
3319
      self.cfg.MarkInstanceOffline(instance.name)
3320
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3321
    else:
3322
      # Mark instance as online, but stopped
3323
      self.cfg.MarkInstanceDown(instance.name)
3324
      result.append(("admin_state", constants.ADMINST_DOWN))
3325

    
3326
    self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3327

    
3328
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3329
                self.owned_locks(locking.LEVEL_NODE)), \
3330
      "All node locks should have been released by now"
3331

    
3332
    return result
3333

    
3334
  _DISK_CONVERSIONS = {
3335
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3336
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3337
    }
3338

    
3339

    
3340
class LUInstanceChangeGroup(LogicalUnit):
3341
  HPATH = "instance-change-group"
3342
  HTYPE = constants.HTYPE_INSTANCE
3343
  REQ_BGL = False
3344

    
3345
  def ExpandNames(self):
3346
    self.share_locks = ShareAll()
3347

    
3348
    self.needed_locks = {
3349
      locking.LEVEL_NODEGROUP: [],
3350
      locking.LEVEL_NODE: [],
3351
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3352
      }
3353

    
3354
    self._ExpandAndLockInstance()
3355

    
3356
    if self.op.target_groups:
3357
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3358
                                  self.op.target_groups)
3359
    else:
3360
      self.req_target_uuids = None
3361

    
3362
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3363

    
3364
  def DeclareLocks(self, level):
3365
    if level == locking.LEVEL_NODEGROUP:
3366
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3367

    
3368
      if self.req_target_uuids:
3369
        lock_groups = set(self.req_target_uuids)
3370

    
3371
        # Lock all groups used by instance optimistically; this requires going
3372
        # via the node before it's locked, requiring verification later on
3373
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3374
        lock_groups.update(instance_groups)
3375
      else:
3376
        # No target groups, need to lock all of them
3377
        lock_groups = locking.ALL_SET
3378

    
3379
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3380

    
3381
    elif level == locking.LEVEL_NODE:
3382
      if self.req_target_uuids:
3383
        # Lock all nodes used by instances
3384
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3385
        self._LockInstancesNodes()
3386

    
3387
        # Lock all nodes in all potential target groups
3388
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3389
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3390
        member_nodes = [node_name
3391
                        for group in lock_groups
3392
                        for node_name in self.cfg.GetNodeGroup(group).members]
3393
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3394
      else:
3395
        # Lock all nodes as all groups are potential targets
3396
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3397

    
3398
  def CheckPrereq(self):
3399
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3400
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3401
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3402

    
3403
    assert (self.req_target_uuids is None or
3404
            owned_groups.issuperset(self.req_target_uuids))
3405
    assert owned_instances == set([self.op.instance_name])
3406

    
3407
    # Get instance information
3408
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3409

    
3410
    # Check if node groups for locked instance are still correct
3411
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3412
      ("Instance %s's nodes changed while we kept the lock" %
3413
       self.op.instance_name)
3414

    
3415
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3416
                                          owned_groups)
3417

    
3418
    if self.req_target_uuids:
3419
      # User requested specific target groups
3420
      self.target_uuids = frozenset(self.req_target_uuids)
3421
    else:
3422
      # All groups except those used by the instance are potential targets
3423
      self.target_uuids = owned_groups - inst_groups
3424

    
3425
    conflicting_groups = self.target_uuids & inst_groups
3426
    if conflicting_groups:
3427
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3428
                                 " used by the instance '%s'" %
3429
                                 (utils.CommaJoin(conflicting_groups),
3430
                                  self.op.instance_name),
3431
                                 errors.ECODE_INVAL)
3432

    
3433
    if not self.target_uuids:
3434
      raise errors.OpPrereqError("There are no possible target groups",
3435
                                 errors.ECODE_INVAL)
3436

    
3437
  def BuildHooksEnv(self):
3438
    """Build hooks env.
3439

3440
    """
3441
    assert self.target_uuids
3442

    
3443
    env = {
3444
      "TARGET_GROUPS": " ".join(self.target_uuids),
3445
      }
3446

    
3447
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3448

    
3449
    return env
3450

    
3451
  def BuildHooksNodes(self):
3452
    """Build hooks nodes.
3453

3454
    """
3455
    mn = self.cfg.GetMasterNode()
3456
    return ([mn], [mn])
3457

    
3458
  def Exec(self, feedback_fn):
3459
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3460

    
3461
    assert instances == [self.op.instance_name], "Instance not locked"
3462

    
3463
    req = iallocator.IAReqGroupChange(instances=instances,
3464
                                      target_groups=list(self.target_uuids))
3465
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3466

    
3467
    ial.Run(self.op.iallocator)
3468

    
3469
    if not ial.success:
3470
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3471
                                 " instance '%s' using iallocator '%s': %s" %
3472
                                 (self.op.instance_name, self.op.iallocator,
3473
                                  ial.info), errors.ECODE_NORES)
3474

    
3475
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3476

    
3477
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3478
                 " instance '%s'", len(jobs), self.op.instance_name)
3479

    
3480
    return ResultWithJobs(jobs)