Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 4b92e992

History | View | Annotate | Download (134.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
64

    
65
import ganeti.masterd.instance
66

    
67

    
68
#: Type description for changes as returned by L{_ApplyContainerMods}'s
69
#: callbacks
70
_TApplyContModsCbChanges = \
71
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
72
    ht.TNonEmptyString,
73
    ht.TAny,
74
    ])))
75

    
76

    
77
def _CheckHostnameSane(lu, name):
78
  """Ensures that a given hostname resolves to a 'sane' name.
79

80
  The given name is required to be a prefix of the resolved hostname,
81
  to prevent accidental mismatches.
82

83
  @param lu: the logical unit on behalf of which we're checking
84
  @param name: the name we should resolve and check
85
  @return: the resolved hostname object
86

87
  """
88
  hostname = netutils.GetHostname(name=name)
89
  if hostname.name != name:
90
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91
  if not utils.MatchNameComponent(name, [hostname.name]):
92
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93
                                " same as given hostname '%s'") %
94
                               (hostname.name, name), errors.ECODE_INVAL)
95
  return hostname
96

    
97

    
98
def _CheckOpportunisticLocking(op):
99
  """Generate error if opportunistic locking is not possible.
100

101
  """
102
  if op.opportunistic_locking and not op.iallocator:
103
    raise errors.OpPrereqError("Opportunistic locking is only available in"
104
                               " combination with an instance allocator",
105
                               errors.ECODE_INVAL)
106

    
107

    
108
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109
  """Wrapper around IAReqInstanceAlloc.
110

111
  @param op: The instance opcode
112
  @param disks: The computed disks
113
  @param nics: The computed nics
114
  @param beparams: The full filled beparams
115
  @param node_whitelist: List of nodes which should appear as online to the
116
    allocator (unless the node is already marked offline)
117

118
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
119

120
  """
121
  spindle_use = beparams[constants.BE_SPINDLE_USE]
122
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123
                                       disk_template=op.disk_template,
124
                                       tags=op.tags,
125
                                       os=op.os_type,
126
                                       vcpus=beparams[constants.BE_VCPUS],
127
                                       memory=beparams[constants.BE_MAXMEM],
128
                                       spindle_use=spindle_use,
129
                                       disks=disks,
130
                                       nics=[n.ToDict() for n in nics],
131
                                       hypervisor=op.hypervisor,
132
                                       node_whitelist=node_whitelist)
133

    
134

    
135
def _ComputeFullBeParams(op, cluster):
136
  """Computes the full beparams.
137

138
  @param op: The instance opcode
139
  @param cluster: The cluster config object
140

141
  @return: The fully filled beparams
142

143
  """
144
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
145
  for param, value in op.beparams.iteritems():
146
    if value == constants.VALUE_AUTO:
147
      op.beparams[param] = default_beparams[param]
148
  objects.UpgradeBeParams(op.beparams)
149
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150
  return cluster.SimpleFillBE(op.beparams)
151

    
152

    
153
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154
  """Computes the nics.
155

156
  @param op: The instance opcode
157
  @param cluster: Cluster configuration object
158
  @param default_ip: The default ip to assign
159
  @param cfg: An instance of the configuration object
160
  @param ec_id: Execution context ID
161

162
  @returns: The build up nics
163

164
  """
165
  nics = []
166
  for nic in op.nics:
167
    nic_mode_req = nic.get(constants.INIC_MODE, None)
168
    nic_mode = nic_mode_req
169
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
171

    
172
    net = nic.get(constants.INIC_NETWORK, None)
173
    link = nic.get(constants.NIC_LINK, None)
174
    ip = nic.get(constants.INIC_IP, None)
175

    
176
    if net is None or net.lower() == constants.VALUE_NONE:
177
      net = None
178
    else:
179
      if nic_mode_req is not None or link is not None:
180
        raise errors.OpPrereqError("If network is given, no mode or link"
181
                                   " is allowed to be passed",
182
                                   errors.ECODE_INVAL)
183

    
184
    # ip validity checks
185
    if ip is None or ip.lower() == constants.VALUE_NONE:
186
      nic_ip = None
187
    elif ip.lower() == constants.VALUE_AUTO:
188
      if not op.name_check:
189
        raise errors.OpPrereqError("IP address set to auto but name checks"
190
                                   " have been skipped",
191
                                   errors.ECODE_INVAL)
192
      nic_ip = default_ip
193
    else:
194
      # We defer pool operations until later, so that the iallocator has
195
      # filled in the instance's node(s) dimara
196
      if ip.lower() == constants.NIC_IP_POOL:
197
        if net is None:
198
          raise errors.OpPrereqError("if ip=pool, parameter network"
199
                                     " must be passed too",
200
                                     errors.ECODE_INVAL)
201

    
202
      elif not netutils.IPAddress.IsValid(ip):
203
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
204
                                   errors.ECODE_INVAL)
205

    
206
      nic_ip = ip
207

    
208
    # TODO: check the ip address for uniqueness
209
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
211
                                 errors.ECODE_INVAL)
212

    
213
    # MAC address verification
214
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216
      mac = utils.NormalizeAndValidateMac(mac)
217

    
218
      try:
219
        # TODO: We need to factor this out
220
        cfg.ReserveMAC(mac, ec_id)
221
      except errors.ReservationError:
222
        raise errors.OpPrereqError("MAC address %s already in use"
223
                                   " in cluster" % mac,
224
                                   errors.ECODE_NOTUNIQUE)
225

    
226
    #  Build nic parameters
227
    nicparams = {}
228
    if nic_mode_req:
229
      nicparams[constants.NIC_MODE] = nic_mode
230
    if link:
231
      nicparams[constants.NIC_LINK] = link
232

    
233
    check_params = cluster.SimpleFillNIC(nicparams)
234
    objects.NIC.CheckParameterSyntax(check_params)
235
    net_uuid = cfg.LookupNetwork(net)
236
    name = nic.get(constants.INIC_NAME, None)
237
    if name is not None and name.lower() == constants.VALUE_NONE:
238
      name = None
239
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240
                          network=net_uuid, nicparams=nicparams)
241
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
242
    nics.append(nic_obj)
243

    
244
  return nics
245

    
246

    
247
def _CheckForConflictingIp(lu, ip, node):
248
  """In case of conflicting IP address raise error.
249

250
  @type ip: string
251
  @param ip: IP address
252
  @type node: string
253
  @param node: node name
254

255
  """
256
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257
  if conf_net is not None:
258
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259
                                " network %s, but the target NIC does not." %
260
                                (ip, conf_net)),
261
                               errors.ECODE_STATE)
262

    
263
  return (None, None)
264

    
265

    
266
def _ComputeIPolicyInstanceSpecViolation(
267
  ipolicy, instance_spec, disk_template,
268
  _compute_fn=ComputeIPolicySpecViolation):
269
  """Compute if instance specs meets the specs of ipolicy.
270

271
  @type ipolicy: dict
272
  @param ipolicy: The ipolicy to verify against
273
  @param instance_spec: dict
274
  @param instance_spec: The instance spec to verify
275
  @type disk_template: string
276
  @param disk_template: the disk template of the instance
277
  @param _compute_fn: The function to verify ipolicy (unittest only)
278
  @see: L{ComputeIPolicySpecViolation}
279

280
  """
281
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
287

    
288
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289
                     disk_sizes, spindle_use, disk_template)
290

    
291

    
292
def _CheckOSVariant(os_obj, name):
293
  """Check whether an OS name conforms to the os variants specification.
294

295
  @type os_obj: L{objects.OS}
296
  @param os_obj: OS object to check
297
  @type name: string
298
  @param name: OS name passed by the user, to check for validity
299

300
  """
301
  variant = objects.OS.GetVariant(name)
302
  if not os_obj.supported_variants:
303
    if variant:
304
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305
                                 " passed)" % (os_obj.name, variant),
306
                                 errors.ECODE_INVAL)
307
    return
308
  if not variant:
309
    raise errors.OpPrereqError("OS name must include a variant",
310
                               errors.ECODE_INVAL)
311

    
312
  if variant not in os_obj.supported_variants:
313
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
314

    
315

    
316
class LUInstanceCreate(LogicalUnit):
317
  """Create an instance.
318

319
  """
320
  HPATH = "instance-add"
321
  HTYPE = constants.HTYPE_INSTANCE
322
  REQ_BGL = False
323

    
324
  def CheckArguments(self):
325
    """Check arguments.
326

327
    """
328
    # do not require name_check to ease forward/backward compatibility
329
    # for tools
330
    if self.op.no_install and self.op.start:
331
      self.LogInfo("No-installation mode selected, disabling startup")
332
      self.op.start = False
333
    # validate/normalize the instance name
334
    self.op.instance_name = \
335
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
336

    
337
    if self.op.ip_check and not self.op.name_check:
338
      # TODO: make the ip check more flexible and not depend on the name check
339
      raise errors.OpPrereqError("Cannot do IP address check without a name"
340
                                 " check", errors.ECODE_INVAL)
341

    
342
    # check nics' parameter names
343
    for nic in self.op.nics:
344
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345
    # check that NIC's parameters names are unique and valid
346
    utils.ValidateDeviceNames("NIC", self.op.nics)
347

    
348
    # check that disk's names are unique and valid
349
    utils.ValidateDeviceNames("disk", self.op.disks)
350

    
351
    cluster = self.cfg.GetClusterInfo()
352
    if not self.op.disk_template in cluster.enabled_disk_templates:
353
      raise errors.OpPrereqError("Cannot create an instance with disk template"
354
                                 " '%s', because it is not enabled in the"
355
                                 " cluster. Enabled disk templates are: %s." %
356
                                 (self.op.disk_template,
357
                                  ",".join(cluster.enabled_disk_templates)))
358

    
359
    # check disks. parameter names and consistent adopt/no-adopt strategy
360
    has_adopt = has_no_adopt = False
361
    for disk in self.op.disks:
362
      if self.op.disk_template != constants.DT_EXT:
363
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364
      if constants.IDISK_ADOPT in disk:
365
        has_adopt = True
366
      else:
367
        has_no_adopt = True
368
    if has_adopt and has_no_adopt:
369
      raise errors.OpPrereqError("Either all disks are adopted or none is",
370
                                 errors.ECODE_INVAL)
371
    if has_adopt:
372
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373
        raise errors.OpPrereqError("Disk adoption is not supported for the"
374
                                   " '%s' disk template" %
375
                                   self.op.disk_template,
376
                                   errors.ECODE_INVAL)
377
      if self.op.iallocator is not None:
378
        raise errors.OpPrereqError("Disk adoption not allowed with an"
379
                                   " iallocator script", errors.ECODE_INVAL)
380
      if self.op.mode == constants.INSTANCE_IMPORT:
381
        raise errors.OpPrereqError("Disk adoption not allowed for"
382
                                   " instance import", errors.ECODE_INVAL)
383
    else:
384
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
385
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386
                                   " but no 'adopt' parameter given" %
387
                                   self.op.disk_template,
388
                                   errors.ECODE_INVAL)
389

    
390
    self.adopt_disks = has_adopt
391

    
392
    # instance name verification
393
    if self.op.name_check:
394
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395
      self.op.instance_name = self.hostname1.name
396
      # used in CheckPrereq for ip ping check
397
      self.check_ip = self.hostname1.ip
398
    else:
399
      self.check_ip = None
400

    
401
    # file storage checks
402
    if (self.op.file_driver and
403
        not self.op.file_driver in constants.FILE_DRIVER):
404
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
405
                                 self.op.file_driver, errors.ECODE_INVAL)
406

    
407
    if self.op.disk_template == constants.DT_FILE:
408
      opcodes.RequireFileStorage()
409
    elif self.op.disk_template == constants.DT_SHARED_FILE:
410
      opcodes.RequireSharedFileStorage()
411

    
412
    ### Node/iallocator related checks
413
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
414

    
415
    if self.op.pnode is not None:
416
      if self.op.disk_template in constants.DTS_INT_MIRROR:
417
        if self.op.snode is None:
418
          raise errors.OpPrereqError("The networked disk templates need"
419
                                     " a mirror node", errors.ECODE_INVAL)
420
      elif self.op.snode:
421
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
422
                        " template")
423
        self.op.snode = None
424

    
425
    _CheckOpportunisticLocking(self.op)
426

    
427
    self._cds = GetClusterDomainSecret()
428

    
429
    if self.op.mode == constants.INSTANCE_IMPORT:
430
      # On import force_variant must be True, because if we forced it at
431
      # initial install, our only chance when importing it back is that it
432
      # works again!
433
      self.op.force_variant = True
434

    
435
      if self.op.no_install:
436
        self.LogInfo("No-installation mode has no effect during import")
437

    
438
    elif self.op.mode == constants.INSTANCE_CREATE:
439
      if self.op.os_type is None:
440
        raise errors.OpPrereqError("No guest OS specified",
441
                                   errors.ECODE_INVAL)
442
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
443
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
444
                                   " installation" % self.op.os_type,
445
                                   errors.ECODE_STATE)
446
      if self.op.disk_template is None:
447
        raise errors.OpPrereqError("No disk template specified",
448
                                   errors.ECODE_INVAL)
449

    
450
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
451
      # Check handshake to ensure both clusters have the same domain secret
452
      src_handshake = self.op.source_handshake
453
      if not src_handshake:
454
        raise errors.OpPrereqError("Missing source handshake",
455
                                   errors.ECODE_INVAL)
456

    
457
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
458
                                                           src_handshake)
459
      if errmsg:
460
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
461
                                   errors.ECODE_INVAL)
462

    
463
      # Load and check source CA
464
      self.source_x509_ca_pem = self.op.source_x509_ca
465
      if not self.source_x509_ca_pem:
466
        raise errors.OpPrereqError("Missing source X509 CA",
467
                                   errors.ECODE_INVAL)
468

    
469
      try:
470
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
471
                                                    self._cds)
472
      except OpenSSL.crypto.Error, err:
473
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
474
                                   (err, ), errors.ECODE_INVAL)
475

    
476
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
477
      if errcode is not None:
478
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
479
                                   errors.ECODE_INVAL)
480

    
481
      self.source_x509_ca = cert
482

    
483
      src_instance_name = self.op.source_instance_name
484
      if not src_instance_name:
485
        raise errors.OpPrereqError("Missing source instance name",
486
                                   errors.ECODE_INVAL)
487

    
488
      self.source_instance_name = \
489
        netutils.GetHostname(name=src_instance_name).name
490

    
491
    else:
492
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
493
                                 self.op.mode, errors.ECODE_INVAL)
494

    
495
  def ExpandNames(self):
496
    """ExpandNames for CreateInstance.
497

498
    Figure out the right locks for instance creation.
499

500
    """
501
    self.needed_locks = {}
502

    
503
    instance_name = self.op.instance_name
504
    # this is just a preventive check, but someone might still add this
505
    # instance in the meantime, and creation will fail at lock-add time
506
    if instance_name in self.cfg.GetInstanceList():
507
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
508
                                 instance_name, errors.ECODE_EXISTS)
509

    
510
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
511

    
512
    if self.op.iallocator:
513
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
514
      # specifying a group on instance creation and then selecting nodes from
515
      # that group
516
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
517
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
518

    
519
      if self.op.opportunistic_locking:
520
        self.opportunistic_locks[locking.LEVEL_NODE] = True
521
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
522
    else:
523
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
524
      nodelist = [self.op.pnode]
525
      if self.op.snode is not None:
526
        self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
527
        nodelist.append(self.op.snode)
528
      self.needed_locks[locking.LEVEL_NODE] = nodelist
529

    
530
    # in case of import lock the source node too
531
    if self.op.mode == constants.INSTANCE_IMPORT:
532
      src_node = self.op.src_node
533
      src_path = self.op.src_path
534

    
535
      if src_path is None:
536
        self.op.src_path = src_path = self.op.instance_name
537

    
538
      if src_node is None:
539
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
540
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
541
        self.op.src_node = None
542
        if os.path.isabs(src_path):
543
          raise errors.OpPrereqError("Importing an instance from a path"
544
                                     " requires a source node option",
545
                                     errors.ECODE_INVAL)
546
      else:
547
        self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
548
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
549
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
550
        if not os.path.isabs(src_path):
551
          self.op.src_path = src_path = \
552
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
553

    
554
    self.needed_locks[locking.LEVEL_NODE_RES] = \
555
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
556

    
557
  def _RunAllocator(self):
558
    """Run the allocator based on input opcode.
559

560
    """
561
    if self.op.opportunistic_locking:
562
      # Only consider nodes for which a lock is held
563
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
564
    else:
565
      node_whitelist = None
566

    
567
    #TODO Export network to iallocator so that it chooses a pnode
568
    #     in a nodegroup that has the desired network connected to
569
    req = _CreateInstanceAllocRequest(self.op, self.disks,
570
                                      self.nics, self.be_full,
571
                                      node_whitelist)
572
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
573

    
574
    ial.Run(self.op.iallocator)
575

    
576
    if not ial.success:
577
      # When opportunistic locks are used only a temporary failure is generated
578
      if self.op.opportunistic_locking:
579
        ecode = errors.ECODE_TEMP_NORES
580
      else:
581
        ecode = errors.ECODE_NORES
582

    
583
      raise errors.OpPrereqError("Can't compute nodes using"
584
                                 " iallocator '%s': %s" %
585
                                 (self.op.iallocator, ial.info),
586
                                 ecode)
587

    
588
    self.op.pnode = ial.result[0]
589
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
590
                 self.op.instance_name, self.op.iallocator,
591
                 utils.CommaJoin(ial.result))
592

    
593
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
594

    
595
    if req.RequiredNodes() == 2:
596
      self.op.snode = ial.result[1]
597

    
598
  def BuildHooksEnv(self):
599
    """Build hooks env.
600

601
    This runs on master, primary and secondary nodes of the instance.
602

603
    """
604
    env = {
605
      "ADD_MODE": self.op.mode,
606
      }
607
    if self.op.mode == constants.INSTANCE_IMPORT:
608
      env["SRC_NODE"] = self.op.src_node
609
      env["SRC_PATH"] = self.op.src_path
610
      env["SRC_IMAGES"] = self.src_images
611

    
612
    env.update(BuildInstanceHookEnv(
613
      name=self.op.instance_name,
614
      primary_node=self.op.pnode,
615
      secondary_nodes=self.secondaries,
616
      status=self.op.start,
617
      os_type=self.op.os_type,
618
      minmem=self.be_full[constants.BE_MINMEM],
619
      maxmem=self.be_full[constants.BE_MAXMEM],
620
      vcpus=self.be_full[constants.BE_VCPUS],
621
      nics=NICListToTuple(self, self.nics),
622
      disk_template=self.op.disk_template,
623
      disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
624
              d[constants.IDISK_MODE]) for d in self.disks],
625
      bep=self.be_full,
626
      hvp=self.hv_full,
627
      hypervisor_name=self.op.hypervisor,
628
      tags=self.op.tags,
629
      ))
630

    
631
    return env
632

    
633
  def BuildHooksNodes(self):
634
    """Build hooks nodes.
635

636
    """
637
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
638
    return nl, nl
639

    
640
  def _ReadExportInfo(self):
641
    """Reads the export information from disk.
642

643
    It will override the opcode source node and path with the actual
644
    information, if these two were not specified before.
645

646
    @return: the export information
647

648
    """
649
    assert self.op.mode == constants.INSTANCE_IMPORT
650

    
651
    src_node = self.op.src_node
652
    src_path = self.op.src_path
653

    
654
    if src_node is None:
655
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
656
      exp_list = self.rpc.call_export_list(locked_nodes)
657
      found = False
658
      for node in exp_list:
659
        if exp_list[node].fail_msg:
660
          continue
661
        if src_path in exp_list[node].payload:
662
          found = True
663
          self.op.src_node = src_node = node
664
          self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
665
                                                       src_path)
666
          break
667
      if not found:
668
        raise errors.OpPrereqError("No export found for relative path %s" %
669
                                   src_path, errors.ECODE_INVAL)
670

    
671
    CheckNodeOnline(self, src_node)
672
    result = self.rpc.call_export_info(src_node, src_path)
673
    result.Raise("No export or invalid export found in dir %s" % src_path)
674

    
675
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
676
    if not export_info.has_section(constants.INISECT_EXP):
677
      raise errors.ProgrammerError("Corrupted export config",
678
                                   errors.ECODE_ENVIRON)
679

    
680
    ei_version = export_info.get(constants.INISECT_EXP, "version")
681
    if (int(ei_version) != constants.EXPORT_VERSION):
682
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
683
                                 (ei_version, constants.EXPORT_VERSION),
684
                                 errors.ECODE_ENVIRON)
685
    return export_info
686

    
687
  def _ReadExportParams(self, einfo):
688
    """Use export parameters as defaults.
689

690
    In case the opcode doesn't specify (as in override) some instance
691
    parameters, then try to use them from the export information, if
692
    that declares them.
693

694
    """
695
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
696

    
697
    if self.op.disk_template is None:
698
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
699
        self.op.disk_template = einfo.get(constants.INISECT_INS,
700
                                          "disk_template")
701
        if self.op.disk_template not in constants.DISK_TEMPLATES:
702
          raise errors.OpPrereqError("Disk template specified in configuration"
703
                                     " file is not one of the allowed values:"
704
                                     " %s" %
705
                                     " ".join(constants.DISK_TEMPLATES),
706
                                     errors.ECODE_INVAL)
707
      else:
708
        raise errors.OpPrereqError("No disk template specified and the export"
709
                                   " is missing the disk_template information",
710
                                   errors.ECODE_INVAL)
711

    
712
    if not self.op.disks:
713
      disks = []
714
      # TODO: import the disk iv_name too
715
      for idx in range(constants.MAX_DISKS):
716
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
717
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
718
          disks.append({constants.IDISK_SIZE: disk_sz})
719
      self.op.disks = disks
720
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
721
        raise errors.OpPrereqError("No disk info specified and the export"
722
                                   " is missing the disk information",
723
                                   errors.ECODE_INVAL)
724

    
725
    if not self.op.nics:
726
      nics = []
727
      for idx in range(constants.MAX_NICS):
728
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
729
          ndict = {}
730
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
731
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
732
            ndict[name] = v
733
          nics.append(ndict)
734
        else:
735
          break
736
      self.op.nics = nics
737

    
738
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
739
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
740

    
741
    if (self.op.hypervisor is None and
742
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
743
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
744

    
745
    if einfo.has_section(constants.INISECT_HYP):
746
      # use the export parameters but do not override the ones
747
      # specified by the user
748
      for name, value in einfo.items(constants.INISECT_HYP):
749
        if name not in self.op.hvparams:
750
          self.op.hvparams[name] = value
751

    
752
    if einfo.has_section(constants.INISECT_BEP):
753
      # use the parameters, without overriding
754
      for name, value in einfo.items(constants.INISECT_BEP):
755
        if name not in self.op.beparams:
756
          self.op.beparams[name] = value
757
        # Compatibility for the old "memory" be param
758
        if name == constants.BE_MEMORY:
759
          if constants.BE_MAXMEM not in self.op.beparams:
760
            self.op.beparams[constants.BE_MAXMEM] = value
761
          if constants.BE_MINMEM not in self.op.beparams:
762
            self.op.beparams[constants.BE_MINMEM] = value
763
    else:
764
      # try to read the parameters old style, from the main section
765
      for name in constants.BES_PARAMETERS:
766
        if (name not in self.op.beparams and
767
            einfo.has_option(constants.INISECT_INS, name)):
768
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
769

    
770
    if einfo.has_section(constants.INISECT_OSP):
771
      # use the parameters, without overriding
772
      for name, value in einfo.items(constants.INISECT_OSP):
773
        if name not in self.op.osparams:
774
          self.op.osparams[name] = value
775

    
776
  def _RevertToDefaults(self, cluster):
777
    """Revert the instance parameters to the default values.
778

779
    """
780
    # hvparams
781
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
782
    for name in self.op.hvparams.keys():
783
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
784
        del self.op.hvparams[name]
785
    # beparams
786
    be_defs = cluster.SimpleFillBE({})
787
    for name in self.op.beparams.keys():
788
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
789
        del self.op.beparams[name]
790
    # nic params
791
    nic_defs = cluster.SimpleFillNIC({})
792
    for nic in self.op.nics:
793
      for name in constants.NICS_PARAMETERS:
794
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
795
          del nic[name]
796
    # osparams
797
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
798
    for name in self.op.osparams.keys():
799
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
800
        del self.op.osparams[name]
801

    
802
  def _CalculateFileStorageDir(self):
803
    """Calculate final instance file storage dir.
804

805
    """
806
    # file storage dir calculation/check
807
    self.instance_file_storage_dir = None
808
    if self.op.disk_template in constants.DTS_FILEBASED:
809
      # build the full file storage dir path
810
      joinargs = []
811

    
812
      if self.op.disk_template == constants.DT_SHARED_FILE:
813
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
814
      else:
815
        get_fsd_fn = self.cfg.GetFileStorageDir
816

    
817
      cfg_storagedir = get_fsd_fn()
818
      if not cfg_storagedir:
819
        raise errors.OpPrereqError("Cluster file storage dir not defined",
820
                                   errors.ECODE_STATE)
821
      joinargs.append(cfg_storagedir)
822

    
823
      if self.op.file_storage_dir is not None:
824
        joinargs.append(self.op.file_storage_dir)
825

    
826
      joinargs.append(self.op.instance_name)
827

    
828
      # pylint: disable=W0142
829
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
830

    
831
  def CheckPrereq(self): # pylint: disable=R0914
832
    """Check prerequisites.
833

834
    """
835
    self._CalculateFileStorageDir()
836

    
837
    if self.op.mode == constants.INSTANCE_IMPORT:
838
      export_info = self._ReadExportInfo()
839
      self._ReadExportParams(export_info)
840
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
841
    else:
842
      self._old_instance_name = None
843

    
844
    if (not self.cfg.GetVGName() and
845
        self.op.disk_template not in constants.DTS_NOT_LVM):
846
      raise errors.OpPrereqError("Cluster does not support lvm-based"
847
                                 " instances", errors.ECODE_STATE)
848

    
849
    if (self.op.hypervisor is None or
850
        self.op.hypervisor == constants.VALUE_AUTO):
851
      self.op.hypervisor = self.cfg.GetHypervisorType()
852

    
853
    cluster = self.cfg.GetClusterInfo()
854
    enabled_hvs = cluster.enabled_hypervisors
855
    if self.op.hypervisor not in enabled_hvs:
856
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
857
                                 " cluster (%s)" %
858
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
859
                                 errors.ECODE_STATE)
860

    
861
    # Check tag validity
862
    for tag in self.op.tags:
863
      objects.TaggableObject.ValidateTag(tag)
864

    
865
    # check hypervisor parameter syntax (locally)
866
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
867
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
868
                                      self.op.hvparams)
869
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
870
    hv_type.CheckParameterSyntax(filled_hvp)
871
    self.hv_full = filled_hvp
872
    # check that we don't specify global parameters on an instance
873
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
874
                         "instance", "cluster")
875

    
876
    # fill and remember the beparams dict
877
    self.be_full = _ComputeFullBeParams(self.op, cluster)
878

    
879
    # build os parameters
880
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
881

    
882
    # now that hvp/bep are in final format, let's reset to defaults,
883
    # if told to do so
884
    if self.op.identify_defaults:
885
      self._RevertToDefaults(cluster)
886

    
887
    # NIC buildup
888
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
889
                             self.proc.GetECId())
890

    
891
    # disk checks/pre-build
892
    default_vg = self.cfg.GetVGName()
893
    self.disks = ComputeDisks(self.op, default_vg)
894

    
895
    if self.op.mode == constants.INSTANCE_IMPORT:
896
      disk_images = []
897
      for idx in range(len(self.disks)):
898
        option = "disk%d_dump" % idx
899
        if export_info.has_option(constants.INISECT_INS, option):
900
          # FIXME: are the old os-es, disk sizes, etc. useful?
901
          export_name = export_info.get(constants.INISECT_INS, option)
902
          image = utils.PathJoin(self.op.src_path, export_name)
903
          disk_images.append(image)
904
        else:
905
          disk_images.append(False)
906

    
907
      self.src_images = disk_images
908

    
909
      if self.op.instance_name == self._old_instance_name:
910
        for idx, nic in enumerate(self.nics):
911
          if nic.mac == constants.VALUE_AUTO:
912
            nic_mac_ini = "nic%d_mac" % idx
913
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
914

    
915
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
916

    
917
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
918
    if self.op.ip_check:
919
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
920
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
921
                                   (self.check_ip, self.op.instance_name),
922
                                   errors.ECODE_NOTUNIQUE)
923

    
924
    #### mac address generation
925
    # By generating here the mac address both the allocator and the hooks get
926
    # the real final mac address rather than the 'auto' or 'generate' value.
927
    # There is a race condition between the generation and the instance object
928
    # creation, which means that we know the mac is valid now, but we're not
929
    # sure it will be when we actually add the instance. If things go bad
930
    # adding the instance will abort because of a duplicate mac, and the
931
    # creation job will fail.
932
    for nic in self.nics:
933
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
934
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
935

    
936
    #### allocator run
937

    
938
    if self.op.iallocator is not None:
939
      self._RunAllocator()
940

    
941
    # Release all unneeded node locks
942
    keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
943
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
944
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
945
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
946

    
947
    assert (self.owned_locks(locking.LEVEL_NODE) ==
948
            self.owned_locks(locking.LEVEL_NODE_RES)), \
949
      "Node locks differ from node resource locks"
950

    
951
    #### node related checks
952

    
953
    # check primary node
954
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
955
    assert self.pnode is not None, \
956
      "Cannot retrieve locked node %s" % self.op.pnode
957
    if pnode.offline:
958
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
959
                                 pnode.name, errors.ECODE_STATE)
960
    if pnode.drained:
961
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
962
                                 pnode.name, errors.ECODE_STATE)
963
    if not pnode.vm_capable:
964
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
965
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
966

    
967
    self.secondaries = []
968

    
969
    # Fill in any IPs from IP pools. This must happen here, because we need to
970
    # know the nic's primary node, as specified by the iallocator
971
    for idx, nic in enumerate(self.nics):
972
      net_uuid = nic.network
973
      if net_uuid is not None:
974
        nobj = self.cfg.GetNetwork(net_uuid)
975
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
976
        if netparams is None:
977
          raise errors.OpPrereqError("No netparams found for network"
978
                                     " %s. Propably not connected to"
979
                                     " node's %s nodegroup" %
980
                                     (nobj.name, self.pnode.name),
981
                                     errors.ECODE_INVAL)
982
        self.LogInfo("NIC/%d inherits netparams %s" %
983
                     (idx, netparams.values()))
984
        nic.nicparams = dict(netparams)
985
        if nic.ip is not None:
986
          if nic.ip.lower() == constants.NIC_IP_POOL:
987
            try:
988
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
989
            except errors.ReservationError:
990
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
991
                                         " from the address pool" % idx,
992
                                         errors.ECODE_STATE)
993
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
994
          else:
995
            try:
996
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
997
            except errors.ReservationError:
998
              raise errors.OpPrereqError("IP address %s already in use"
999
                                         " or does not belong to network %s" %
1000
                                         (nic.ip, nobj.name),
1001
                                         errors.ECODE_NOTUNIQUE)
1002

    
1003
      # net is None, ip None or given
1004
      elif self.op.conflicts_check:
1005
        _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1006

    
1007
    # mirror node verification
1008
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1009
      if self.op.snode == pnode.name:
1010
        raise errors.OpPrereqError("The secondary node cannot be the"
1011
                                   " primary node", errors.ECODE_INVAL)
1012
      CheckNodeOnline(self, self.op.snode)
1013
      CheckNodeNotDrained(self, self.op.snode)
1014
      CheckNodeVmCapable(self, self.op.snode)
1015
      self.secondaries.append(self.op.snode)
1016

    
1017
      snode = self.cfg.GetNodeInfo(self.op.snode)
1018
      if pnode.group != snode.group:
1019
        self.LogWarning("The primary and secondary nodes are in two"
1020
                        " different node groups; the disk parameters"
1021
                        " from the first disk's node group will be"
1022
                        " used")
1023

    
1024
    nodes = [pnode]
1025
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1026
      nodes.append(snode)
1027
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1028
    if compat.any(map(has_es, nodes)):
1029
      if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1030
        raise errors.OpPrereqError("Disk template %s not supported with"
1031
                                   " exclusive storage" % self.op.disk_template,
1032
                                   errors.ECODE_STATE)
1033

    
1034
    nodenames = [pnode.name] + self.secondaries
1035

    
1036
    if not self.adopt_disks:
1037
      if self.op.disk_template == constants.DT_RBD:
1038
        # _CheckRADOSFreeSpace() is just a placeholder.
1039
        # Any function that checks prerequisites can be placed here.
1040
        # Check if there is enough space on the RADOS cluster.
1041
        CheckRADOSFreeSpace()
1042
      elif self.op.disk_template == constants.DT_EXT:
1043
        # FIXME: Function that checks prereqs if needed
1044
        pass
1045
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1046
        # Check lv size requirements, if not adopting
1047
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1048
        CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1049
      else:
1050
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1051
        pass
1052

    
1053
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1054
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1055
                                disk[constants.IDISK_ADOPT])
1056
                     for disk in self.disks])
1057
      if len(all_lvs) != len(self.disks):
1058
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1059
                                   errors.ECODE_INVAL)
1060
      for lv_name in all_lvs:
1061
        try:
1062
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1063
          # to ReserveLV uses the same syntax
1064
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1065
        except errors.ReservationError:
1066
          raise errors.OpPrereqError("LV named %s used by another instance" %
1067
                                     lv_name, errors.ECODE_NOTUNIQUE)
1068

    
1069
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1070
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1071

    
1072
      node_lvs = self.rpc.call_lv_list([pnode.name],
1073
                                       vg_names.payload.keys())[pnode.name]
1074
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1075
      node_lvs = node_lvs.payload
1076

    
1077
      delta = all_lvs.difference(node_lvs.keys())
1078
      if delta:
1079
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1080
                                   utils.CommaJoin(delta),
1081
                                   errors.ECODE_INVAL)
1082
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1083
      if online_lvs:
1084
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1085
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1086
                                   errors.ECODE_STATE)
1087
      # update the size of disk based on what is found
1088
      for dsk in self.disks:
1089
        dsk[constants.IDISK_SIZE] = \
1090
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1091
                                        dsk[constants.IDISK_ADOPT])][0]))
1092

    
1093
    elif self.op.disk_template == constants.DT_BLOCK:
1094
      # Normalize and de-duplicate device paths
1095
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1096
                       for disk in self.disks])
1097
      if len(all_disks) != len(self.disks):
1098
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1099
                                   errors.ECODE_INVAL)
1100
      baddisks = [d for d in all_disks
1101
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1102
      if baddisks:
1103
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1104
                                   " cannot be adopted" %
1105
                                   (utils.CommaJoin(baddisks),
1106
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1107
                                   errors.ECODE_INVAL)
1108

    
1109
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
1110
                                            list(all_disks))[pnode.name]
1111
      node_disks.Raise("Cannot get block device information from node %s" %
1112
                       pnode.name)
1113
      node_disks = node_disks.payload
1114
      delta = all_disks.difference(node_disks.keys())
1115
      if delta:
1116
        raise errors.OpPrereqError("Missing block device(s): %s" %
1117
                                   utils.CommaJoin(delta),
1118
                                   errors.ECODE_INVAL)
1119
      for dsk in self.disks:
1120
        dsk[constants.IDISK_SIZE] = \
1121
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1122

    
1123
    # Verify instance specs
1124
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1125
    ispec = {
1126
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1127
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1128
      constants.ISPEC_DISK_COUNT: len(self.disks),
1129
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1130
                                  for disk in self.disks],
1131
      constants.ISPEC_NIC_COUNT: len(self.nics),
1132
      constants.ISPEC_SPINDLE_USE: spindle_use,
1133
      }
1134

    
1135
    group_info = self.cfg.GetNodeGroup(pnode.group)
1136
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1137
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1138
                                               self.op.disk_template)
1139
    if not self.op.ignore_ipolicy and res:
1140
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1141
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1142
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1143

    
1144
    CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1145

    
1146
    CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1147
    # check OS parameters (remotely)
1148
    CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1149

    
1150
    CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1151

    
1152
    #TODO: _CheckExtParams (remotely)
1153
    # Check parameters for extstorage
1154

    
1155
    # memory check on primary node
1156
    #TODO(dynmem): use MINMEM for checking
1157
    if self.op.start:
1158
      CheckNodeFreeMemory(self, self.pnode.name,
1159
                          "creating instance %s" % self.op.instance_name,
1160
                          self.be_full[constants.BE_MAXMEM],
1161
                          self.op.hypervisor)
1162

    
1163
    self.dry_run_result = list(nodenames)
1164

    
1165
  def Exec(self, feedback_fn):
1166
    """Create and add the instance to the cluster.
1167

1168
    """
1169
    instance = self.op.instance_name
1170
    pnode_name = self.pnode.name
1171

    
1172
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1173
                self.owned_locks(locking.LEVEL_NODE)), \
1174
      "Node locks differ from node resource locks"
1175
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1176

    
1177
    ht_kind = self.op.hypervisor
1178
    if ht_kind in constants.HTS_REQ_PORT:
1179
      network_port = self.cfg.AllocatePort()
1180
    else:
1181
      network_port = None
1182

    
1183
    # This is ugly but we got a chicken-egg problem here
1184
    # We can only take the group disk parameters, as the instance
1185
    # has no disks yet (we are generating them right here).
1186
    node = self.cfg.GetNodeInfo(pnode_name)
1187
    nodegroup = self.cfg.GetNodeGroup(node.group)
1188
    disks = GenerateDiskTemplate(self,
1189
                                 self.op.disk_template,
1190
                                 instance, pnode_name,
1191
                                 self.secondaries,
1192
                                 self.disks,
1193
                                 self.instance_file_storage_dir,
1194
                                 self.op.file_driver,
1195
                                 0,
1196
                                 feedback_fn,
1197
                                 self.cfg.GetGroupDiskParams(nodegroup))
1198

    
1199
    iobj = objects.Instance(name=instance, os=self.op.os_type,
1200
                            primary_node=pnode_name,
1201
                            nics=self.nics, disks=disks,
1202
                            disk_template=self.op.disk_template,
1203
                            admin_state=constants.ADMINST_DOWN,
1204
                            network_port=network_port,
1205
                            beparams=self.op.beparams,
1206
                            hvparams=self.op.hvparams,
1207
                            hypervisor=self.op.hypervisor,
1208
                            osparams=self.op.osparams,
1209
                            )
1210

    
1211
    if self.op.tags:
1212
      for tag in self.op.tags:
1213
        iobj.AddTag(tag)
1214

    
1215
    if self.adopt_disks:
1216
      if self.op.disk_template == constants.DT_PLAIN:
1217
        # rename LVs to the newly-generated names; we need to construct
1218
        # 'fake' LV disks with the old data, plus the new unique_id
1219
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1220
        rename_to = []
1221
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1222
          rename_to.append(t_dsk.logical_id)
1223
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1224
          self.cfg.SetDiskID(t_dsk, pnode_name)
1225
        result = self.rpc.call_blockdev_rename(pnode_name,
1226
                                               zip(tmp_disks, rename_to))
1227
        result.Raise("Failed to rename adoped LVs")
1228
    else:
1229
      feedback_fn("* creating instance disks...")
1230
      try:
1231
        CreateDisks(self, iobj)
1232
      except errors.OpExecError:
1233
        self.LogWarning("Device creation failed")
1234
        self.cfg.ReleaseDRBDMinors(instance)
1235
        raise
1236

    
1237
    feedback_fn("adding instance %s to cluster config" % instance)
1238

    
1239
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1240

    
1241
    # Declare that we don't want to remove the instance lock anymore, as we've
1242
    # added the instance to the config
1243
    del self.remove_locks[locking.LEVEL_INSTANCE]
1244

    
1245
    if self.op.mode == constants.INSTANCE_IMPORT:
1246
      # Release unused nodes
1247
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1248
    else:
1249
      # Release all nodes
1250
      ReleaseLocks(self, locking.LEVEL_NODE)
1251

    
1252
    disk_abort = False
1253
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1254
      feedback_fn("* wiping instance disks...")
1255
      try:
1256
        WipeDisks(self, iobj)
1257
      except errors.OpExecError, err:
1258
        logging.exception("Wiping disks failed")
1259
        self.LogWarning("Wiping instance disks failed (%s)", err)
1260
        disk_abort = True
1261

    
1262
    if disk_abort:
1263
      # Something is already wrong with the disks, don't do anything else
1264
      pass
1265
    elif self.op.wait_for_sync:
1266
      disk_abort = not WaitForSync(self, iobj)
1267
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1268
      # make sure the disks are not degraded (still sync-ing is ok)
1269
      feedback_fn("* checking mirrors status")
1270
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1271
    else:
1272
      disk_abort = False
1273

    
1274
    if disk_abort:
1275
      RemoveDisks(self, iobj)
1276
      self.cfg.RemoveInstance(iobj.name)
1277
      # Make sure the instance lock gets removed
1278
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1279
      raise errors.OpExecError("There are some degraded disks for"
1280
                               " this instance")
1281

    
1282
    # Release all node resource locks
1283
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1284

    
1285
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1286
      # we need to set the disks ID to the primary node, since the
1287
      # preceding code might or might have not done it, depending on
1288
      # disk template and other options
1289
      for disk in iobj.disks:
1290
        self.cfg.SetDiskID(disk, pnode_name)
1291
      if self.op.mode == constants.INSTANCE_CREATE:
1292
        if not self.op.no_install:
1293
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1294
                        not self.op.wait_for_sync)
1295
          if pause_sync:
1296
            feedback_fn("* pausing disk sync to install instance OS")
1297
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1298
                                                              (iobj.disks,
1299
                                                               iobj), True)
1300
            for idx, success in enumerate(result.payload):
1301
              if not success:
1302
                logging.warn("pause-sync of instance %s for disk %d failed",
1303
                             instance, idx)
1304

    
1305
          feedback_fn("* running the instance OS create scripts...")
1306
          # FIXME: pass debug option from opcode to backend
1307
          os_add_result = \
1308
            self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1309
                                          self.op.debug_level)
1310
          if pause_sync:
1311
            feedback_fn("* resuming disk sync")
1312
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1313
                                                              (iobj.disks,
1314
                                                               iobj), False)
1315
            for idx, success in enumerate(result.payload):
1316
              if not success:
1317
                logging.warn("resume-sync of instance %s for disk %d failed",
1318
                             instance, idx)
1319

    
1320
          os_add_result.Raise("Could not add os for instance %s"
1321
                              " on node %s" % (instance, pnode_name))
1322

    
1323
      else:
1324
        if self.op.mode == constants.INSTANCE_IMPORT:
1325
          feedback_fn("* running the instance OS import scripts...")
1326

    
1327
          transfers = []
1328

    
1329
          for idx, image in enumerate(self.src_images):
1330
            if not image:
1331
              continue
1332

    
1333
            # FIXME: pass debug option from opcode to backend
1334
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1335
                                               constants.IEIO_FILE, (image, ),
1336
                                               constants.IEIO_SCRIPT,
1337
                                               (iobj.disks[idx], idx),
1338
                                               None)
1339
            transfers.append(dt)
1340

    
1341
          import_result = \
1342
            masterd.instance.TransferInstanceData(self, feedback_fn,
1343
                                                  self.op.src_node, pnode_name,
1344
                                                  self.pnode.secondary_ip,
1345
                                                  iobj, transfers)
1346
          if not compat.all(import_result):
1347
            self.LogWarning("Some disks for instance %s on node %s were not"
1348
                            " imported successfully" % (instance, pnode_name))
1349

    
1350
          rename_from = self._old_instance_name
1351

    
1352
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1353
          feedback_fn("* preparing remote import...")
1354
          # The source cluster will stop the instance before attempting to make
1355
          # a connection. In some cases stopping an instance can take a long
1356
          # time, hence the shutdown timeout is added to the connection
1357
          # timeout.
1358
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1359
                             self.op.source_shutdown_timeout)
1360
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1361

    
1362
          assert iobj.primary_node == self.pnode.name
1363
          disk_results = \
1364
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1365
                                          self.source_x509_ca,
1366
                                          self._cds, timeouts)
1367
          if not compat.all(disk_results):
1368
            # TODO: Should the instance still be started, even if some disks
1369
            # failed to import (valid for local imports, too)?
1370
            self.LogWarning("Some disks for instance %s on node %s were not"
1371
                            " imported successfully" % (instance, pnode_name))
1372

    
1373
          rename_from = self.source_instance_name
1374

    
1375
        else:
1376
          # also checked in the prereq part
1377
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1378
                                       % self.op.mode)
1379

    
1380
        # Run rename script on newly imported instance
1381
        assert iobj.name == instance
1382
        feedback_fn("Running rename script for %s" % instance)
1383
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1384
                                                   rename_from,
1385
                                                   self.op.debug_level)
1386
        if result.fail_msg:
1387
          self.LogWarning("Failed to run rename script for %s on node"
1388
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
1389

    
1390
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1391

    
1392
    if self.op.start:
1393
      iobj.admin_state = constants.ADMINST_UP
1394
      self.cfg.Update(iobj, feedback_fn)
1395
      logging.info("Starting instance %s on node %s", instance, pnode_name)
1396
      feedback_fn("* starting instance...")
1397
      result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1398
                                            False, self.op.reason)
1399
      result.Raise("Could not start instance")
1400

    
1401
    return list(iobj.all_nodes)
1402

    
1403

    
1404
class LUInstanceRename(LogicalUnit):
1405
  """Rename an instance.
1406

1407
  """
1408
  HPATH = "instance-rename"
1409
  HTYPE = constants.HTYPE_INSTANCE
1410

    
1411
  def CheckArguments(self):
1412
    """Check arguments.
1413

1414
    """
1415
    if self.op.ip_check and not self.op.name_check:
1416
      # TODO: make the ip check more flexible and not depend on the name check
1417
      raise errors.OpPrereqError("IP address check requires a name check",
1418
                                 errors.ECODE_INVAL)
1419

    
1420
  def BuildHooksEnv(self):
1421
    """Build hooks env.
1422

1423
    This runs on master, primary and secondary nodes of the instance.
1424

1425
    """
1426
    env = BuildInstanceHookEnvByObject(self, self.instance)
1427
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1428
    return env
1429

    
1430
  def BuildHooksNodes(self):
1431
    """Build hooks nodes.
1432

1433
    """
1434
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1435
    return (nl, nl)
1436

    
1437
  def CheckPrereq(self):
1438
    """Check prerequisites.
1439

1440
    This checks that the instance is in the cluster and is not running.
1441

1442
    """
1443
    self.op.instance_name = ExpandInstanceName(self.cfg,
1444
                                               self.op.instance_name)
1445
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1446
    assert instance is not None
1447
    CheckNodeOnline(self, instance.primary_node)
1448
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1449
                       msg="cannot rename")
1450
    self.instance = instance
1451

    
1452
    new_name = self.op.new_name
1453
    if self.op.name_check:
1454
      hostname = _CheckHostnameSane(self, new_name)
1455
      new_name = self.op.new_name = hostname.name
1456
      if (self.op.ip_check and
1457
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1458
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1459
                                   (hostname.ip, new_name),
1460
                                   errors.ECODE_NOTUNIQUE)
1461

    
1462
    instance_list = self.cfg.GetInstanceList()
1463
    if new_name in instance_list and new_name != instance.name:
1464
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1465
                                 new_name, errors.ECODE_EXISTS)
1466

    
1467
  def Exec(self, feedback_fn):
1468
    """Rename the instance.
1469

1470
    """
1471
    inst = self.instance
1472
    old_name = inst.name
1473

    
1474
    rename_file_storage = False
1475
    if (inst.disk_template in constants.DTS_FILEBASED and
1476
        self.op.new_name != inst.name):
1477
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1478
      rename_file_storage = True
1479

    
1480
    self.cfg.RenameInstance(inst.name, self.op.new_name)
1481
    # Change the instance lock. This is definitely safe while we hold the BGL.
1482
    # Otherwise the new lock would have to be added in acquired mode.
1483
    assert self.REQ_BGL
1484
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1485
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1486
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1487

    
1488
    # re-read the instance from the configuration after rename
1489
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
1490

    
1491
    if rename_file_storage:
1492
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1493
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1494
                                                     old_file_storage_dir,
1495
                                                     new_file_storage_dir)
1496
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1497
                   " (but the instance has been renamed in Ganeti)" %
1498
                   (inst.primary_node, old_file_storage_dir,
1499
                    new_file_storage_dir))
1500

    
1501
    StartInstanceDisks(self, inst, None)
1502
    # update info on disks
1503
    info = GetInstanceInfoText(inst)
1504
    for (idx, disk) in enumerate(inst.disks):
1505
      for node in inst.all_nodes:
1506
        self.cfg.SetDiskID(disk, node)
1507
        result = self.rpc.call_blockdev_setinfo(node, disk, info)
1508
        if result.fail_msg:
1509
          self.LogWarning("Error setting info on node %s for disk %s: %s",
1510
                          node, idx, result.fail_msg)
1511
    try:
1512
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1513
                                                 old_name, self.op.debug_level)
1514
      msg = result.fail_msg
1515
      if msg:
1516
        msg = ("Could not run OS rename script for instance %s on node %s"
1517
               " (but the instance has been renamed in Ganeti): %s" %
1518
               (inst.name, inst.primary_node, msg))
1519
        self.LogWarning(msg)
1520
    finally:
1521
      ShutdownInstanceDisks(self, inst)
1522

    
1523
    return inst.name
1524

    
1525

    
1526
class LUInstanceRemove(LogicalUnit):
1527
  """Remove an instance.
1528

1529
  """
1530
  HPATH = "instance-remove"
1531
  HTYPE = constants.HTYPE_INSTANCE
1532
  REQ_BGL = False
1533

    
1534
  def ExpandNames(self):
1535
    self._ExpandAndLockInstance()
1536
    self.needed_locks[locking.LEVEL_NODE] = []
1537
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1538
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1539

    
1540
  def DeclareLocks(self, level):
1541
    if level == locking.LEVEL_NODE:
1542
      self._LockInstancesNodes()
1543
    elif level == locking.LEVEL_NODE_RES:
1544
      # Copy node locks
1545
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1546
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1547

    
1548
  def BuildHooksEnv(self):
1549
    """Build hooks env.
1550

1551
    This runs on master, primary and secondary nodes of the instance.
1552

1553
    """
1554
    env = BuildInstanceHookEnvByObject(self, self.instance)
1555
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1556
    return env
1557

    
1558
  def BuildHooksNodes(self):
1559
    """Build hooks nodes.
1560

1561
    """
1562
    nl = [self.cfg.GetMasterNode()]
1563
    nl_post = list(self.instance.all_nodes) + nl
1564
    return (nl, nl_post)
1565

    
1566
  def CheckPrereq(self):
1567
    """Check prerequisites.
1568

1569
    This checks that the instance is in the cluster.
1570

1571
    """
1572
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1573
    assert self.instance is not None, \
1574
      "Cannot retrieve locked instance %s" % self.op.instance_name
1575

    
1576
  def Exec(self, feedback_fn):
1577
    """Remove the instance.
1578

1579
    """
1580
    instance = self.instance
1581
    logging.info("Shutting down instance %s on node %s",
1582
                 instance.name, instance.primary_node)
1583

    
1584
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1585
                                             self.op.shutdown_timeout,
1586
                                             self.op.reason)
1587
    msg = result.fail_msg
1588
    if msg:
1589
      if self.op.ignore_failures:
1590
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
1591
      else:
1592
        raise errors.OpExecError("Could not shutdown instance %s on"
1593
                                 " node %s: %s" %
1594
                                 (instance.name, instance.primary_node, msg))
1595

    
1596
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1597
            self.owned_locks(locking.LEVEL_NODE_RES))
1598
    assert not (set(instance.all_nodes) -
1599
                self.owned_locks(locking.LEVEL_NODE)), \
1600
      "Not owning correct locks"
1601

    
1602
    RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1603

    
1604

    
1605
class LUInstanceMove(LogicalUnit):
1606
  """Move an instance by data-copying.
1607

1608
  """
1609
  HPATH = "instance-move"
1610
  HTYPE = constants.HTYPE_INSTANCE
1611
  REQ_BGL = False
1612

    
1613
  def ExpandNames(self):
1614
    self._ExpandAndLockInstance()
1615
    target_node = ExpandNodeName(self.cfg, self.op.target_node)
1616
    self.op.target_node = target_node
1617
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
1618
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1619
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1620

    
1621
  def DeclareLocks(self, level):
1622
    if level == locking.LEVEL_NODE:
1623
      self._LockInstancesNodes(primary_only=True)
1624
    elif level == locking.LEVEL_NODE_RES:
1625
      # Copy node locks
1626
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1627
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1628

    
1629
  def BuildHooksEnv(self):
1630
    """Build hooks env.
1631

1632
    This runs on master, primary and secondary nodes of the instance.
1633

1634
    """
1635
    env = {
1636
      "TARGET_NODE": self.op.target_node,
1637
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1638
      }
1639
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1640
    return env
1641

    
1642
  def BuildHooksNodes(self):
1643
    """Build hooks nodes.
1644

1645
    """
1646
    nl = [
1647
      self.cfg.GetMasterNode(),
1648
      self.instance.primary_node,
1649
      self.op.target_node,
1650
      ]
1651
    return (nl, nl)
1652

    
1653
  def CheckPrereq(self):
1654
    """Check prerequisites.
1655

1656
    This checks that the instance is in the cluster.
1657

1658
    """
1659
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1660
    assert self.instance is not None, \
1661
      "Cannot retrieve locked instance %s" % self.op.instance_name
1662

    
1663
    if instance.disk_template not in constants.DTS_COPYABLE:
1664
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1665
                                 instance.disk_template, errors.ECODE_STATE)
1666

    
1667
    node = self.cfg.GetNodeInfo(self.op.target_node)
1668
    assert node is not None, \
1669
      "Cannot retrieve locked node %s" % self.op.target_node
1670

    
1671
    self.target_node = target_node = node.name
1672

    
1673
    if target_node == instance.primary_node:
1674
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1675
                                 (instance.name, target_node),
1676
                                 errors.ECODE_STATE)
1677

    
1678
    bep = self.cfg.GetClusterInfo().FillBE(instance)
1679

    
1680
    for idx, dsk in enumerate(instance.disks):
1681
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1682
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1683
                                   " cannot copy" % idx, errors.ECODE_STATE)
1684

    
1685
    CheckNodeOnline(self, target_node)
1686
    CheckNodeNotDrained(self, target_node)
1687
    CheckNodeVmCapable(self, target_node)
1688
    cluster = self.cfg.GetClusterInfo()
1689
    group_info = self.cfg.GetNodeGroup(node.group)
1690
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1691
    CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1692
                           ignore=self.op.ignore_ipolicy)
1693

    
1694
    if instance.admin_state == constants.ADMINST_UP:
1695
      # check memory requirements on the secondary node
1696
      CheckNodeFreeMemory(self, target_node,
1697
                          "failing over instance %s" %
1698
                          instance.name, bep[constants.BE_MAXMEM],
1699
                          instance.hypervisor)
1700
    else:
1701
      self.LogInfo("Not checking memory on the secondary node as"
1702
                   " instance will not be started")
1703

    
1704
    # check bridge existance
1705
    CheckInstanceBridgesExist(self, instance, node=target_node)
1706

    
1707
  def Exec(self, feedback_fn):
1708
    """Move an instance.
1709

1710
    The move is done by shutting it down on its present node, copying
1711
    the data over (slow) and starting it on the new node.
1712

1713
    """
1714
    instance = self.instance
1715

    
1716
    source_node = instance.primary_node
1717
    target_node = self.target_node
1718

    
1719
    self.LogInfo("Shutting down instance %s on source node %s",
1720
                 instance.name, source_node)
1721

    
1722
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1723
            self.owned_locks(locking.LEVEL_NODE_RES))
1724

    
1725
    result = self.rpc.call_instance_shutdown(source_node, instance,
1726
                                             self.op.shutdown_timeout,
1727
                                             self.op.reason)
1728
    msg = result.fail_msg
1729
    if msg:
1730
      if self.op.ignore_consistency:
1731
        self.LogWarning("Could not shutdown instance %s on node %s."
1732
                        " Proceeding anyway. Please make sure node"
1733
                        " %s is down. Error details: %s",
1734
                        instance.name, source_node, source_node, msg)
1735
      else:
1736
        raise errors.OpExecError("Could not shutdown instance %s on"
1737
                                 " node %s: %s" %
1738
                                 (instance.name, source_node, msg))
1739

    
1740
    # create the target disks
1741
    try:
1742
      CreateDisks(self, instance, target_node=target_node)
1743
    except errors.OpExecError:
1744
      self.LogWarning("Device creation failed")
1745
      self.cfg.ReleaseDRBDMinors(instance.name)
1746
      raise
1747

    
1748
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1749

    
1750
    errs = []
1751
    # activate, get path, copy the data over
1752
    for idx, disk in enumerate(instance.disks):
1753
      self.LogInfo("Copying data for disk %d", idx)
1754
      result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1755
                                               instance.name, True, idx)
1756
      if result.fail_msg:
1757
        self.LogWarning("Can't assemble newly created disk %d: %s",
1758
                        idx, result.fail_msg)
1759
        errs.append(result.fail_msg)
1760
        break
1761
      dev_path = result.payload
1762
      result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1763
                                             target_node, dev_path,
1764
                                             cluster_name)
1765
      if result.fail_msg:
1766
        self.LogWarning("Can't copy data over for disk %d: %s",
1767
                        idx, result.fail_msg)
1768
        errs.append(result.fail_msg)
1769
        break
1770

    
1771
    if errs:
1772
      self.LogWarning("Some disks failed to copy, aborting")
1773
      try:
1774
        RemoveDisks(self, instance, target_node=target_node)
1775
      finally:
1776
        self.cfg.ReleaseDRBDMinors(instance.name)
1777
        raise errors.OpExecError("Errors during disk copy: %s" %
1778
                                 (",".join(errs),))
1779

    
1780
    instance.primary_node = target_node
1781
    self.cfg.Update(instance, feedback_fn)
1782

    
1783
    self.LogInfo("Removing the disks on the original node")
1784
    RemoveDisks(self, instance, target_node=source_node)
1785

    
1786
    # Only start the instance if it's marked as up
1787
    if instance.admin_state == constants.ADMINST_UP:
1788
      self.LogInfo("Starting instance %s on node %s",
1789
                   instance.name, target_node)
1790

    
1791
      disks_ok, _ = AssembleInstanceDisks(self, instance,
1792
                                          ignore_secondaries=True)
1793
      if not disks_ok:
1794
        ShutdownInstanceDisks(self, instance)
1795
        raise errors.OpExecError("Can't activate the instance's disks")
1796

    
1797
      result = self.rpc.call_instance_start(target_node,
1798
                                            (instance, None, None), False,
1799
                                            self.op.reason)
1800
      msg = result.fail_msg
1801
      if msg:
1802
        ShutdownInstanceDisks(self, instance)
1803
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1804
                                 (instance.name, target_node, msg))
1805

    
1806

    
1807
class LUInstanceMultiAlloc(NoHooksLU):
1808
  """Allocates multiple instances at the same time.
1809

1810
  """
1811
  REQ_BGL = False
1812

    
1813
  def CheckArguments(self):
1814
    """Check arguments.
1815

1816
    """
1817
    nodes = []
1818
    for inst in self.op.instances:
1819
      if inst.iallocator is not None:
1820
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1821
                                   " instance objects", errors.ECODE_INVAL)
1822
      nodes.append(bool(inst.pnode))
1823
      if inst.disk_template in constants.DTS_INT_MIRROR:
1824
        nodes.append(bool(inst.snode))
1825

    
1826
    has_nodes = compat.any(nodes)
1827
    if compat.all(nodes) ^ has_nodes:
1828
      raise errors.OpPrereqError("There are instance objects providing"
1829
                                 " pnode/snode while others do not",
1830
                                 errors.ECODE_INVAL)
1831

    
1832
    if self.op.iallocator is None:
1833
      default_iallocator = self.cfg.GetDefaultIAllocator()
1834
      if default_iallocator and has_nodes:
1835
        self.op.iallocator = default_iallocator
1836
      else:
1837
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1838
                                   " given and no cluster-wide default"
1839
                                   " iallocator found; please specify either"
1840
                                   " an iallocator or nodes on the instances"
1841
                                   " or set a cluster-wide default iallocator",
1842
                                   errors.ECODE_INVAL)
1843

    
1844
    _CheckOpportunisticLocking(self.op)
1845

    
1846
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1847
    if dups:
1848
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1849
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1850

    
1851
  def ExpandNames(self):
1852
    """Calculate the locks.
1853

1854
    """
1855
    self.share_locks = ShareAll()
1856
    self.needed_locks = {
1857
      # iallocator will select nodes and even if no iallocator is used,
1858
      # collisions with LUInstanceCreate should be avoided
1859
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1860
      }
1861

    
1862
    if self.op.iallocator:
1863
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1864
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1865

    
1866
      if self.op.opportunistic_locking:
1867
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1868
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1869
    else:
1870
      nodeslist = []
1871
      for inst in self.op.instances:
1872
        inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1873
        nodeslist.append(inst.pnode)
1874
        if inst.snode is not None:
1875
          inst.snode = ExpandNodeName(self.cfg, inst.snode)
1876
          nodeslist.append(inst.snode)
1877

    
1878
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1879
      # Lock resources of instance's primary and secondary nodes (copy to
1880
      # prevent accidential modification)
1881
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1882

    
1883
  def CheckPrereq(self):
1884
    """Check prerequisite.
1885

1886
    """
1887
    cluster = self.cfg.GetClusterInfo()
1888
    default_vg = self.cfg.GetVGName()
1889
    ec_id = self.proc.GetECId()
1890

    
1891
    if self.op.opportunistic_locking:
1892
      # Only consider nodes for which a lock is held
1893
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1894
    else:
1895
      node_whitelist = None
1896

    
1897
    insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1898
                                         _ComputeNics(op, cluster, None,
1899
                                                      self.cfg, ec_id),
1900
                                         _ComputeFullBeParams(op, cluster),
1901
                                         node_whitelist)
1902
             for op in self.op.instances]
1903

    
1904
    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1905
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1906

    
1907
    ial.Run(self.op.iallocator)
1908

    
1909
    if not ial.success:
1910
      raise errors.OpPrereqError("Can't compute nodes using"
1911
                                 " iallocator '%s': %s" %
1912
                                 (self.op.iallocator, ial.info),
1913
                                 errors.ECODE_NORES)
1914

    
1915
    self.ia_result = ial.result
1916

    
1917
    if self.op.dry_run:
1918
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1919
        constants.JOB_IDS_KEY: [],
1920
        })
1921

    
1922
  def _ConstructPartialResult(self):
1923
    """Contructs the partial result.
1924

1925
    """
1926
    (allocatable, failed) = self.ia_result
1927
    return {
1928
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1929
        map(compat.fst, allocatable),
1930
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1931
      }
1932

    
1933
  def Exec(self, feedback_fn):
1934
    """Executes the opcode.
1935

1936
    """
1937
    op2inst = dict((op.instance_name, op) for op in self.op.instances)
1938
    (allocatable, failed) = self.ia_result
1939

    
1940
    jobs = []
1941
    for (name, nodes) in allocatable:
1942
      op = op2inst.pop(name)
1943

    
1944
      if len(nodes) > 1:
1945
        (op.pnode, op.snode) = nodes
1946
      else:
1947
        (op.pnode,) = nodes
1948

    
1949
      jobs.append([op])
1950

    
1951
    missing = set(op2inst.keys()) - set(failed)
1952
    assert not missing, \
1953
      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1954

    
1955
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
1956

    
1957

    
1958
class _InstNicModPrivate:
1959
  """Data structure for network interface modifications.
1960

1961
  Used by L{LUInstanceSetParams}.
1962

1963
  """
1964
  def __init__(self):
1965
    self.params = None
1966
    self.filled = None
1967

    
1968

    
1969
def _PrepareContainerMods(mods, private_fn):
1970
  """Prepares a list of container modifications by adding a private data field.
1971

1972
  @type mods: list of tuples; (operation, index, parameters)
1973
  @param mods: List of modifications
1974
  @type private_fn: callable or None
1975
  @param private_fn: Callable for constructing a private data field for a
1976
    modification
1977
  @rtype: list
1978

1979
  """
1980
  if private_fn is None:
1981
    fn = lambda: None
1982
  else:
1983
    fn = private_fn
1984

    
1985
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
1986

    
1987

    
1988
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
1989
  """Checks if nodes have enough physical CPUs
1990

1991
  This function checks if all given nodes have the needed number of
1992
  physical CPUs. In case any node has less CPUs or we cannot get the
1993
  information from the node, this function raises an OpPrereqError
1994
  exception.
1995

1996
  @type lu: C{LogicalUnit}
1997
  @param lu: a logical unit from which we get configuration data
1998
  @type nodenames: C{list}
1999
  @param nodenames: the list of node names to check
2000
  @type requested: C{int}
2001
  @param requested: the minimum acceptable number of physical CPUs
2002
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2003
      or we cannot check the node
2004

2005
  """
2006
  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2007
  for node in nodenames:
2008
    info = nodeinfo[node]
2009
    info.Raise("Cannot get current information from node %s" % node,
2010
               prereq=True, ecode=errors.ECODE_ENVIRON)
2011
    (_, _, (hv_info, )) = info.payload
2012
    num_cpus = hv_info.get("cpu_total", None)
2013
    if not isinstance(num_cpus, int):
2014
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2015
                                 " on node %s, result was '%s'" %
2016
                                 (node, num_cpus), errors.ECODE_ENVIRON)
2017
    if requested > num_cpus:
2018
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2019
                                 "required" % (node, num_cpus, requested),
2020
                                 errors.ECODE_NORES)
2021

    
2022

    
2023
def GetItemFromContainer(identifier, kind, container):
2024
  """Return the item refered by the identifier.
2025

2026
  @type identifier: string
2027
  @param identifier: Item index or name or UUID
2028
  @type kind: string
2029
  @param kind: One-word item description
2030
  @type container: list
2031
  @param container: Container to get the item from
2032

2033
  """
2034
  # Index
2035
  try:
2036
    idx = int(identifier)
2037
    if idx == -1:
2038
      # Append
2039
      absidx = len(container) - 1
2040
    elif idx < 0:
2041
      raise IndexError("Not accepting negative indices other than -1")
2042
    elif idx > len(container):
2043
      raise IndexError("Got %s index %s, but there are only %s" %
2044
                       (kind, idx, len(container)))
2045
    else:
2046
      absidx = idx
2047
    return (absidx, container[idx])
2048
  except ValueError:
2049
    pass
2050

    
2051
  for idx, item in enumerate(container):
2052
    if item.uuid == identifier or item.name == identifier:
2053
      return (idx, item)
2054

    
2055
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2056
                             (kind, identifier), errors.ECODE_NOENT)
2057

    
2058

    
2059
def _ApplyContainerMods(kind, container, chgdesc, mods,
2060
                        create_fn, modify_fn, remove_fn):
2061
  """Applies descriptions in C{mods} to C{container}.
2062

2063
  @type kind: string
2064
  @param kind: One-word item description
2065
  @type container: list
2066
  @param container: Container to modify
2067
  @type chgdesc: None or list
2068
  @param chgdesc: List of applied changes
2069
  @type mods: list
2070
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2071
  @type create_fn: callable
2072
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2073
    receives absolute item index, parameters and private data object as added
2074
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2075
    as list
2076
  @type modify_fn: callable
2077
  @param modify_fn: Callback for modifying an existing item
2078
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2079
    and private data object as added by L{_PrepareContainerMods}, returns
2080
    changes as list
2081
  @type remove_fn: callable
2082
  @param remove_fn: Callback on removing item; receives absolute item index,
2083
    item and private data object as added by L{_PrepareContainerMods}
2084

2085
  """
2086
  for (op, identifier, params, private) in mods:
2087
    changes = None
2088

    
2089
    if op == constants.DDM_ADD:
2090
      # Calculate where item will be added
2091
      # When adding an item, identifier can only be an index
2092
      try:
2093
        idx = int(identifier)
2094
      except ValueError:
2095
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2096
                                   " identifier for %s" % constants.DDM_ADD,
2097
                                   errors.ECODE_INVAL)
2098
      if idx == -1:
2099
        addidx = len(container)
2100
      else:
2101
        if idx < 0:
2102
          raise IndexError("Not accepting negative indices other than -1")
2103
        elif idx > len(container):
2104
          raise IndexError("Got %s index %s, but there are only %s" %
2105
                           (kind, idx, len(container)))
2106
        addidx = idx
2107

    
2108
      if create_fn is None:
2109
        item = params
2110
      else:
2111
        (item, changes) = create_fn(addidx, params, private)
2112

    
2113
      if idx == -1:
2114
        container.append(item)
2115
      else:
2116
        assert idx >= 0
2117
        assert idx <= len(container)
2118
        # list.insert does so before the specified index
2119
        container.insert(idx, item)
2120
    else:
2121
      # Retrieve existing item
2122
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2123

    
2124
      if op == constants.DDM_REMOVE:
2125
        assert not params
2126

    
2127
        if remove_fn is not None:
2128
          remove_fn(absidx, item, private)
2129

    
2130
        changes = [("%s/%s" % (kind, absidx), "remove")]
2131

    
2132
        assert container[absidx] == item
2133
        del container[absidx]
2134
      elif op == constants.DDM_MODIFY:
2135
        if modify_fn is not None:
2136
          changes = modify_fn(absidx, item, params, private)
2137
      else:
2138
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2139

    
2140
    assert _TApplyContModsCbChanges(changes)
2141

    
2142
    if not (chgdesc is None or changes is None):
2143
      chgdesc.extend(changes)
2144

    
2145

    
2146
def _UpdateIvNames(base_index, disks):
2147
  """Updates the C{iv_name} attribute of disks.
2148

2149
  @type disks: list of L{objects.Disk}
2150

2151
  """
2152
  for (idx, disk) in enumerate(disks):
2153
    disk.iv_name = "disk/%s" % (base_index + idx, )
2154

    
2155

    
2156
class LUInstanceSetParams(LogicalUnit):
2157
  """Modifies an instances's parameters.
2158

2159
  """
2160
  HPATH = "instance-modify"
2161
  HTYPE = constants.HTYPE_INSTANCE
2162
  REQ_BGL = False
2163

    
2164
  @staticmethod
2165
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2166
    assert ht.TList(mods)
2167
    assert not mods or len(mods[0]) in (2, 3)
2168

    
2169
    if mods and len(mods[0]) == 2:
2170
      result = []
2171

    
2172
      addremove = 0
2173
      for op, params in mods:
2174
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2175
          result.append((op, -1, params))
2176
          addremove += 1
2177

    
2178
          if addremove > 1:
2179
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2180
                                       " supported at a time" % kind,
2181
                                       errors.ECODE_INVAL)
2182
        else:
2183
          result.append((constants.DDM_MODIFY, op, params))
2184

    
2185
      assert verify_fn(result)
2186
    else:
2187
      result = mods
2188

    
2189
    return result
2190

    
2191
  @staticmethod
2192
  def _CheckMods(kind, mods, key_types, item_fn):
2193
    """Ensures requested disk/NIC modifications are valid.
2194

2195
    """
2196
    for (op, _, params) in mods:
2197
      assert ht.TDict(params)
2198

    
2199
      # If 'key_types' is an empty dict, we assume we have an
2200
      # 'ext' template and thus do not ForceDictType
2201
      if key_types:
2202
        utils.ForceDictType(params, key_types)
2203

    
2204
      if op == constants.DDM_REMOVE:
2205
        if params:
2206
          raise errors.OpPrereqError("No settings should be passed when"
2207
                                     " removing a %s" % kind,
2208
                                     errors.ECODE_INVAL)
2209
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2210
        item_fn(op, params)
2211
      else:
2212
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2213

    
2214
  @staticmethod
2215
  def _VerifyDiskModification(op, params):
2216
    """Verifies a disk modification.
2217

2218
    """
2219
    if op == constants.DDM_ADD:
2220
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2221
      if mode not in constants.DISK_ACCESS_SET:
2222
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2223
                                   errors.ECODE_INVAL)
2224

    
2225
      size = params.get(constants.IDISK_SIZE, None)
2226
      if size is None:
2227
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2228
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2229

    
2230
      try:
2231
        size = int(size)
2232
      except (TypeError, ValueError), err:
2233
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2234
                                   errors.ECODE_INVAL)
2235

    
2236
      params[constants.IDISK_SIZE] = size
2237
      name = params.get(constants.IDISK_NAME, None)
2238
      if name is not None and name.lower() == constants.VALUE_NONE:
2239
        params[constants.IDISK_NAME] = None
2240

    
2241
    elif op == constants.DDM_MODIFY:
2242
      if constants.IDISK_SIZE in params:
2243
        raise errors.OpPrereqError("Disk size change not possible, use"
2244
                                   " grow-disk", errors.ECODE_INVAL)
2245
      if len(params) > 2:
2246
        raise errors.OpPrereqError("Disk modification doesn't support"
2247
                                   " additional arbitrary parameters",
2248
                                   errors.ECODE_INVAL)
2249
      name = params.get(constants.IDISK_NAME, None)
2250
      if name is not None and name.lower() == constants.VALUE_NONE:
2251
        params[constants.IDISK_NAME] = None
2252

    
2253
  @staticmethod
2254
  def _VerifyNicModification(op, params):
2255
    """Verifies a network interface modification.
2256

2257
    """
2258
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2259
      ip = params.get(constants.INIC_IP, None)
2260
      name = params.get(constants.INIC_NAME, None)
2261
      req_net = params.get(constants.INIC_NETWORK, None)
2262
      link = params.get(constants.NIC_LINK, None)
2263
      mode = params.get(constants.NIC_MODE, None)
2264
      if name is not None and name.lower() == constants.VALUE_NONE:
2265
        params[constants.INIC_NAME] = None
2266
      if req_net is not None:
2267
        if req_net.lower() == constants.VALUE_NONE:
2268
          params[constants.INIC_NETWORK] = None
2269
          req_net = None
2270
        elif link is not None or mode is not None:
2271
          raise errors.OpPrereqError("If network is given"
2272
                                     " mode or link should not",
2273
                                     errors.ECODE_INVAL)
2274

    
2275
      if op == constants.DDM_ADD:
2276
        macaddr = params.get(constants.INIC_MAC, None)
2277
        if macaddr is None:
2278
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2279

    
2280
      if ip is not None:
2281
        if ip.lower() == constants.VALUE_NONE:
2282
          params[constants.INIC_IP] = None
2283
        else:
2284
          if ip.lower() == constants.NIC_IP_POOL:
2285
            if op == constants.DDM_ADD and req_net is None:
2286
              raise errors.OpPrereqError("If ip=pool, parameter network"
2287
                                         " cannot be none",
2288
                                         errors.ECODE_INVAL)
2289
          else:
2290
            if not netutils.IPAddress.IsValid(ip):
2291
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2292
                                         errors.ECODE_INVAL)
2293

    
2294
      if constants.INIC_MAC in params:
2295
        macaddr = params[constants.INIC_MAC]
2296
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2297
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2298

    
2299
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2300
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2301
                                     " modifying an existing NIC",
2302
                                     errors.ECODE_INVAL)
2303

    
2304
  def CheckArguments(self):
2305
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2306
            self.op.hvparams or self.op.beparams or self.op.os_name or
2307
            self.op.offline is not None or self.op.runtime_mem or
2308
            self.op.pnode):
2309
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2310

    
2311
    if self.op.hvparams:
2312
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2313
                           "hypervisor", "instance", "cluster")
2314

    
2315
    self.op.disks = self._UpgradeDiskNicMods(
2316
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2317
    self.op.nics = self._UpgradeDiskNicMods(
2318
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2319

    
2320
    if self.op.disks and self.op.disk_template is not None:
2321
      raise errors.OpPrereqError("Disk template conversion and other disk"
2322
                                 " changes not supported at the same time",
2323
                                 errors.ECODE_INVAL)
2324

    
2325
    if (self.op.disk_template and
2326
        self.op.disk_template in constants.DTS_INT_MIRROR and
2327
        self.op.remote_node is None):
2328
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2329
                                 " one requires specifying a secondary node",
2330
                                 errors.ECODE_INVAL)
2331

    
2332
    # Check NIC modifications
2333
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2334
                    self._VerifyNicModification)
2335

    
2336
    if self.op.pnode:
2337
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2338

    
2339
  def ExpandNames(self):
2340
    self._ExpandAndLockInstance()
2341
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2342
    # Can't even acquire node locks in shared mode as upcoming changes in
2343
    # Ganeti 2.6 will start to modify the node object on disk conversion
2344
    self.needed_locks[locking.LEVEL_NODE] = []
2345
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2346
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2347
    # Look node group to look up the ipolicy
2348
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2349

    
2350
  def DeclareLocks(self, level):
2351
    if level == locking.LEVEL_NODEGROUP:
2352
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2353
      # Acquire locks for the instance's nodegroups optimistically. Needs
2354
      # to be verified in CheckPrereq
2355
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2356
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2357
    elif level == locking.LEVEL_NODE:
2358
      self._LockInstancesNodes()
2359
      if self.op.disk_template and self.op.remote_node:
2360
        self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2361
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2362
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2363
      # Copy node locks
2364
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2365
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2366

    
2367
  def BuildHooksEnv(self):
2368
    """Build hooks env.
2369

2370
    This runs on the master, primary and secondaries.
2371

2372
    """
2373
    args = {}
2374
    if constants.BE_MINMEM in self.be_new:
2375
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2376
    if constants.BE_MAXMEM in self.be_new:
2377
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2378
    if constants.BE_VCPUS in self.be_new:
2379
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2380
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2381
    # information at all.
2382

    
2383
    if self._new_nics is not None:
2384
      nics = []
2385

    
2386
      for nic in self._new_nics:
2387
        n = copy.deepcopy(nic)
2388
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2389
        n.nicparams = nicparams
2390
        nics.append(NICToTuple(self, n))
2391

    
2392
      args["nics"] = nics
2393

    
2394
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2395
    if self.op.disk_template:
2396
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2397
    if self.op.runtime_mem:
2398
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2399

    
2400
    return env
2401

    
2402
  def BuildHooksNodes(self):
2403
    """Build hooks nodes.
2404

2405
    """
2406
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2407
    return (nl, nl)
2408

    
2409
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2410
                              old_params, cluster, pnode):
2411

    
2412
    update_params_dict = dict([(key, params[key])
2413
                               for key in constants.NICS_PARAMETERS
2414
                               if key in params])
2415

    
2416
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2417
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2418

    
2419
    new_net_uuid = None
2420
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2421
    if new_net_uuid_or_name:
2422
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2423
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2424

    
2425
    if old_net_uuid:
2426
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2427

    
2428
    if new_net_uuid:
2429
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2430
      if not netparams:
2431
        raise errors.OpPrereqError("No netparams found for the network"
2432
                                   " %s, probably not connected" %
2433
                                   new_net_obj.name, errors.ECODE_INVAL)
2434
      new_params = dict(netparams)
2435
    else:
2436
      new_params = GetUpdatedParams(old_params, update_params_dict)
2437

    
2438
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2439

    
2440
    new_filled_params = cluster.SimpleFillNIC(new_params)
2441
    objects.NIC.CheckParameterSyntax(new_filled_params)
2442

    
2443
    new_mode = new_filled_params[constants.NIC_MODE]
2444
    if new_mode == constants.NIC_MODE_BRIDGED:
2445
      bridge = new_filled_params[constants.NIC_LINK]
2446
      msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2447
      if msg:
2448
        msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2449
        if self.op.force:
2450
          self.warn.append(msg)
2451
        else:
2452
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2453

    
2454
    elif new_mode == constants.NIC_MODE_ROUTED:
2455
      ip = params.get(constants.INIC_IP, old_ip)
2456
      if ip is None:
2457
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2458
                                   " on a routed NIC", errors.ECODE_INVAL)
2459

    
2460
    elif new_mode == constants.NIC_MODE_OVS:
2461
      # TODO: check OVS link
2462
      self.LogInfo("OVS links are currently not checked for correctness")
2463

    
2464
    if constants.INIC_MAC in params:
2465
      mac = params[constants.INIC_MAC]
2466
      if mac is None:
2467
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2468
                                   errors.ECODE_INVAL)
2469
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2470
        # otherwise generate the MAC address
2471
        params[constants.INIC_MAC] = \
2472
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2473
      else:
2474
        # or validate/reserve the current one
2475
        try:
2476
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2477
        except errors.ReservationError:
2478
          raise errors.OpPrereqError("MAC address '%s' already in use"
2479
                                     " in cluster" % mac,
2480
                                     errors.ECODE_NOTUNIQUE)
2481
    elif new_net_uuid != old_net_uuid:
2482

    
2483
      def get_net_prefix(net_uuid):
2484
        mac_prefix = None
2485
        if net_uuid:
2486
          nobj = self.cfg.GetNetwork(net_uuid)
2487
          mac_prefix = nobj.mac_prefix
2488

    
2489
        return mac_prefix
2490

    
2491
      new_prefix = get_net_prefix(new_net_uuid)
2492
      old_prefix = get_net_prefix(old_net_uuid)
2493
      if old_prefix != new_prefix:
2494
        params[constants.INIC_MAC] = \
2495
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2496

    
2497
    # if there is a change in (ip, network) tuple
2498
    new_ip = params.get(constants.INIC_IP, old_ip)
2499
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2500
      if new_ip:
2501
        # if IP is pool then require a network and generate one IP
2502
        if new_ip.lower() == constants.NIC_IP_POOL:
2503
          if new_net_uuid:
2504
            try:
2505
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2506
            except errors.ReservationError:
2507
              raise errors.OpPrereqError("Unable to get a free IP"
2508
                                         " from the address pool",
2509
                                         errors.ECODE_STATE)
2510
            self.LogInfo("Chose IP %s from network %s",
2511
                         new_ip,
2512
                         new_net_obj.name)
2513
            params[constants.INIC_IP] = new_ip
2514
          else:
2515
            raise errors.OpPrereqError("ip=pool, but no network found",
2516
                                       errors.ECODE_INVAL)
2517
        # Reserve new IP if in the new network if any
2518
        elif new_net_uuid:
2519
          try:
2520
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2521
            self.LogInfo("Reserving IP %s in network %s",
2522
                         new_ip, new_net_obj.name)
2523
          except errors.ReservationError:
2524
            raise errors.OpPrereqError("IP %s not available in network %s" %
2525
                                       (new_ip, new_net_obj.name),
2526
                                       errors.ECODE_NOTUNIQUE)
2527
        # new network is None so check if new IP is a conflicting IP
2528
        elif self.op.conflicts_check:
2529
          _CheckForConflictingIp(self, new_ip, pnode)
2530

    
2531
      # release old IP if old network is not None
2532
      if old_ip and old_net_uuid:
2533
        try:
2534
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2535
        except errors.AddressPoolError:
2536
          logging.warning("Release IP %s not contained in network %s",
2537
                          old_ip, old_net_obj.name)
2538

    
2539
    # there are no changes in (ip, network) tuple and old network is not None
2540
    elif (old_net_uuid is not None and
2541
          (req_link is not None or req_mode is not None)):
2542
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2543
                                 " a NIC that is connected to a network",
2544
                                 errors.ECODE_INVAL)
2545

    
2546
    private.params = new_params
2547
    private.filled = new_filled_params
2548

    
2549
  def _PreCheckDiskTemplate(self, pnode_info):
2550
    """CheckPrereq checks related to a new disk template."""
2551
    # Arguments are passed to avoid configuration lookups
2552
    instance = self.instance
2553
    pnode = instance.primary_node
2554
    cluster = self.cluster
2555
    if instance.disk_template == self.op.disk_template:
2556
      raise errors.OpPrereqError("Instance already has disk template %s" %
2557
                                 instance.disk_template, errors.ECODE_INVAL)
2558

    
2559
    if (instance.disk_template,
2560
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2561
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2562
                                 " %s to %s" % (instance.disk_template,
2563
                                                self.op.disk_template),
2564
                                 errors.ECODE_INVAL)
2565
    CheckInstanceState(self, instance, INSTANCE_DOWN,
2566
                       msg="cannot change disk template")
2567
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2568
      if self.op.remote_node == pnode:
2569
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2570
                                   " as the primary node of the instance" %
2571
                                   self.op.remote_node, errors.ECODE_STATE)
2572
      CheckNodeOnline(self, self.op.remote_node)
2573
      CheckNodeNotDrained(self, self.op.remote_node)
2574
      # FIXME: here we assume that the old instance type is DT_PLAIN
2575
      assert instance.disk_template == constants.DT_PLAIN
2576
      disks = [{constants.IDISK_SIZE: d.size,
2577
                constants.IDISK_VG: d.logical_id[0]}
2578
               for d in instance.disks]
2579
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2580
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2581

    
2582
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2583
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2584
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2585
                                                              snode_group)
2586
      CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2587
                             ignore=self.op.ignore_ipolicy)
2588
      if pnode_info.group != snode_info.group:
2589
        self.LogWarning("The primary and secondary nodes are in two"
2590
                        " different node groups; the disk parameters"
2591
                        " from the first disk's node group will be"
2592
                        " used")
2593

    
2594
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2595
      # Make sure none of the nodes require exclusive storage
2596
      nodes = [pnode_info]
2597
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2598
        assert snode_info
2599
        nodes.append(snode_info)
2600
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2601
      if compat.any(map(has_es, nodes)):
2602
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2603
                  " storage is enabled" % (instance.disk_template,
2604
                                           self.op.disk_template))
2605
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2606

    
2607
  def _PreCheckDisks(self, ispec):
2608
    """CheckPrereq checks related to disk changes.
2609

2610
    @type ispec: dict
2611
    @param ispec: instance specs to be updated with the new disks
2612

2613
    """
2614
    instance = self.instance
2615
    self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2616

    
2617
    # Check disk modifications. This is done here and not in CheckArguments
2618
    # (as with NICs), because we need to know the instance's disk template
2619
    if instance.disk_template == constants.DT_EXT:
2620
      self._CheckMods("disk", self.op.disks, {},
2621
                      self._VerifyDiskModification)
2622
    else:
2623
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2624
                      self._VerifyDiskModification)
2625

    
2626
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2627

    
2628
    # Check the validity of the `provider' parameter
2629
    if instance.disk_template in constants.DT_EXT:
2630
      for mod in self.diskmod:
2631
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2632
        if mod[0] == constants.DDM_ADD:
2633
          if ext_provider is None:
2634
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2635
                                       " '%s' missing, during disk add" %
2636
                                       (constants.DT_EXT,
2637
                                        constants.IDISK_PROVIDER),
2638
                                       errors.ECODE_NOENT)
2639
        elif mod[0] == constants.DDM_MODIFY:
2640
          if ext_provider:
2641
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2642
                                       " modification" %
2643
                                       constants.IDISK_PROVIDER,
2644
                                       errors.ECODE_INVAL)
2645
    else:
2646
      for mod in self.diskmod:
2647
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2648
        if ext_provider is not None:
2649
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2650
                                     " instances of type '%s'" %
2651
                                     (constants.IDISK_PROVIDER,
2652
                                      constants.DT_EXT),
2653
                                     errors.ECODE_INVAL)
2654

    
2655
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2656
      raise errors.OpPrereqError("Disk operations not supported for"
2657
                                 " diskless instances", errors.ECODE_INVAL)
2658

    
2659
    def _PrepareDiskMod(_, disk, params, __):
2660
      disk.name = params.get(constants.IDISK_NAME, None)
2661

    
2662
    # Verify disk changes (operating on a copy)
2663
    disks = copy.deepcopy(instance.disks)
2664
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2665
                        _PrepareDiskMod, None)
2666
    utils.ValidateDeviceNames("disk", disks)
2667
    if len(disks) > constants.MAX_DISKS:
2668
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2669
                                 " more" % constants.MAX_DISKS,
2670
                                 errors.ECODE_STATE)
2671
    disk_sizes = [disk.size for disk in instance.disks]
2672
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2673
                      self.diskmod if op == constants.DDM_ADD)
2674
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2675
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2676

    
2677
    if self.op.offline is not None and self.op.offline:
2678
      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2679
                         msg="can't change to offline")
2680

    
2681
  def CheckPrereq(self):
2682
    """Check prerequisites.
2683

2684
    This only checks the instance list against the existing names.
2685

2686
    """
2687
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2688
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2689

    
2690
    cluster = self.cluster = self.cfg.GetClusterInfo()
2691
    assert self.instance is not None, \
2692
      "Cannot retrieve locked instance %s" % self.op.instance_name
2693

    
2694
    pnode = instance.primary_node
2695

    
2696
    self.warn = []
2697

    
2698
    if (self.op.pnode is not None and self.op.pnode != pnode and
2699
        not self.op.force):
2700
      # verify that the instance is not up
2701
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2702
                                                  instance.hypervisor)
2703
      if instance_info.fail_msg:
2704
        self.warn.append("Can't get instance runtime information: %s" %
2705
                         instance_info.fail_msg)
2706
      elif instance_info.payload:
2707
        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2708
                                   errors.ECODE_STATE)
2709

    
2710
    assert pnode in self.owned_locks(locking.LEVEL_NODE)
2711
    nodelist = list(instance.all_nodes)
2712
    pnode_info = self.cfg.GetNodeInfo(pnode)
2713

    
2714
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2715
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2716
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2717

    
2718
    # dictionary with instance information after the modification
2719
    ispec = {}
2720

    
2721
    # Prepare NIC modifications
2722
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2723

    
2724
    # OS change
2725
    if self.op.os_name and not self.op.force:
2726
      CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2727
                     self.op.force_variant)
2728
      instance_os = self.op.os_name
2729
    else:
2730
      instance_os = instance.os
2731

    
2732
    assert not (self.op.disk_template and self.op.disks), \
2733
      "Can't modify disk template and apply disk changes at the same time"
2734

    
2735
    if self.op.disk_template:
2736
      self._PreCheckDiskTemplate(pnode_info)
2737

    
2738
    self._PreCheckDisks(ispec)
2739

    
2740
    # hvparams processing
2741
    if self.op.hvparams:
2742
      hv_type = instance.hypervisor
2743
      i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2744
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2745
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2746

    
2747
      # local check
2748
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2749
      CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2750
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2751
      self.hv_inst = i_hvdict # the new dict (without defaults)
2752
    else:
2753
      self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2754
                                              instance.hvparams)
2755
      self.hv_new = self.hv_inst = {}
2756

    
2757
    # beparams processing
2758
    if self.op.beparams:
2759
      i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2760
                                  use_none=True)
2761
      objects.UpgradeBeParams(i_bedict)
2762
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2763
      be_new = cluster.SimpleFillBE(i_bedict)
2764
      self.be_proposed = self.be_new = be_new # the new actual values
2765
      self.be_inst = i_bedict # the new dict (without defaults)
2766
    else:
2767
      self.be_new = self.be_inst = {}
2768
      self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2769
    be_old = cluster.FillBE(instance)
2770

    
2771
    # CPU param validation -- checking every time a parameter is
2772
    # changed to cover all cases where either CPU mask or vcpus have
2773
    # changed
2774
    if (constants.BE_VCPUS in self.be_proposed and
2775
        constants.HV_CPU_MASK in self.hv_proposed):
2776
      cpu_list = \
2777
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2778
      # Verify mask is consistent with number of vCPUs. Can skip this
2779
      # test if only 1 entry in the CPU mask, which means same mask
2780
      # is applied to all vCPUs.
2781
      if (len(cpu_list) > 1 and
2782
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2783
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2784
                                   " CPU mask [%s]" %
2785
                                   (self.be_proposed[constants.BE_VCPUS],
2786
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2787
                                   errors.ECODE_INVAL)
2788

    
2789
      # Only perform this test if a new CPU mask is given
2790
      if constants.HV_CPU_MASK in self.hv_new:
2791
        # Calculate the largest CPU number requested
2792
        max_requested_cpu = max(map(max, cpu_list))
2793
        # Check that all of the instance's nodes have enough physical CPUs to
2794
        # satisfy the requested CPU mask
2795
        _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2796
                                max_requested_cpu + 1, instance.hypervisor)
2797

    
2798
    # osparams processing
2799
    if self.op.osparams:
2800
      i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2801
      CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2802
      self.os_inst = i_osdict # the new dict (without defaults)
2803
    else:
2804
      self.os_inst = {}
2805

    
2806
    #TODO(dynmem): do the appropriate check involving MINMEM
2807
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2808
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2809
      mem_check_list = [pnode]
2810
      if be_new[constants.BE_AUTO_BALANCE]:
2811
        # either we changed auto_balance to yes or it was from before
2812
        mem_check_list.extend(instance.secondary_nodes)
2813
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2814
                                                  instance.hypervisor)
2815
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2816
                                         [instance.hypervisor], False)
2817
      pninfo = nodeinfo[pnode]
2818
      msg = pninfo.fail_msg
2819
      if msg:
2820
        # Assume the primary node is unreachable and go ahead
2821
        self.warn.append("Can't get info from primary node %s: %s" %
2822
                         (pnode, msg))
2823
      else:
2824
        (_, _, (pnhvinfo, )) = pninfo.payload
2825
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2826
          self.warn.append("Node data from primary node %s doesn't contain"
2827
                           " free memory information" % pnode)
2828
        elif instance_info.fail_msg:
2829
          self.warn.append("Can't get instance runtime information: %s" %
2830
                           instance_info.fail_msg)
2831
        else:
2832
          if instance_info.payload:
2833
            current_mem = int(instance_info.payload["memory"])
2834
          else:
2835
            # Assume instance not running
2836
            # (there is a slight race condition here, but it's not very
2837
            # probable, and we have no other way to check)
2838
            # TODO: Describe race condition
2839
            current_mem = 0
2840
          #TODO(dynmem): do the appropriate check involving MINMEM
2841
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2842
                      pnhvinfo["memory_free"])
2843
          if miss_mem > 0:
2844
            raise errors.OpPrereqError("This change will prevent the instance"
2845
                                       " from starting, due to %d MB of memory"
2846
                                       " missing on its primary node" %
2847
                                       miss_mem, errors.ECODE_NORES)
2848

    
2849
      if be_new[constants.BE_AUTO_BALANCE]:
2850
        for node, nres in nodeinfo.items():
2851
          if node not in instance.secondary_nodes:
2852
            continue
2853
          nres.Raise("Can't get info from secondary node %s" % node,
2854
                     prereq=True, ecode=errors.ECODE_STATE)
2855
          (_, _, (nhvinfo, )) = nres.payload
2856
          if not isinstance(nhvinfo.get("memory_free", None), int):
2857
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2858
                                       " memory information" % node,
2859
                                       errors.ECODE_STATE)
2860
          #TODO(dynmem): do the appropriate check involving MINMEM
2861
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2862
            raise errors.OpPrereqError("This change will prevent the instance"
2863
                                       " from failover to its secondary node"
2864
                                       " %s, due to not enough memory" % node,
2865
                                       errors.ECODE_STATE)
2866

    
2867
    if self.op.runtime_mem:
2868
      remote_info = self.rpc.call_instance_info(instance.primary_node,
2869
                                                instance.name,
2870
                                                instance.hypervisor)
2871
      remote_info.Raise("Error checking node %s" % instance.primary_node)
2872
      if not remote_info.payload: # not running already
2873
        raise errors.OpPrereqError("Instance %s is not running" %
2874
                                   instance.name, errors.ECODE_STATE)
2875

    
2876
      current_memory = remote_info.payload["memory"]
2877
      if (not self.op.force and
2878
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2879
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2880
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2881
                                   " and %d MB of memory unless --force is"
2882
                                   " given" %
2883
                                   (instance.name,
2884
                                    self.be_proposed[constants.BE_MINMEM],
2885
                                    self.be_proposed[constants.BE_MAXMEM]),
2886
                                   errors.ECODE_INVAL)
2887

    
2888
      delta = self.op.runtime_mem - current_memory
2889
      if delta > 0:
2890
        CheckNodeFreeMemory(self, instance.primary_node,
2891
                            "ballooning memory for instance %s" %
2892
                            instance.name, delta, instance.hypervisor)
2893

    
2894
    def _PrepareNicCreate(_, params, private):
2895
      self._PrepareNicModification(params, private, None, None,
2896
                                   {}, cluster, pnode)
2897
      return (None, None)
2898

    
2899
    def _PrepareNicMod(_, nic, params, private):
2900
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2901
                                   nic.nicparams, cluster, pnode)
2902
      return None
2903

    
2904
    def _PrepareNicRemove(_, params, __):
2905
      ip = params.ip
2906
      net = params.network
2907
      if net is not None and ip is not None:
2908
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2909

    
2910
    # Verify NIC changes (operating on copy)
2911
    nics = instance.nics[:]
2912
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2913
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2914
    if len(nics) > constants.MAX_NICS:
2915
      raise errors.OpPrereqError("Instance has too many network interfaces"
2916
                                 " (%d), cannot add more" % constants.MAX_NICS,
2917
                                 errors.ECODE_STATE)
2918

    
2919
    # Pre-compute NIC changes (necessary to use result in hooks)
2920
    self._nic_chgdesc = []
2921
    if self.nicmod:
2922
      # Operate on copies as this is still in prereq
2923
      nics = [nic.Copy() for nic in instance.nics]
2924
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2925
                          self._CreateNewNic, self._ApplyNicMods, None)
2926
      # Verify that NIC names are unique and valid
2927
      utils.ValidateDeviceNames("NIC", nics)
2928
      self._new_nics = nics
2929
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2930
    else:
2931
      self._new_nics = None
2932
      ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2933

    
2934
    if not self.op.ignore_ipolicy:
2935
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2936
                                                              group_info)
2937

    
2938
      # Fill ispec with backend parameters
2939
      ispec[constants.ISPEC_SPINDLE_USE] = \
2940
        self.be_new.get(constants.BE_SPINDLE_USE, None)
2941
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2942
                                                         None)
2943

    
2944
      # Copy ispec to verify parameters with min/max values separately
2945
      if self.op.disk_template:
2946
        new_disk_template = self.op.disk_template
2947
      else:
2948
        new_disk_template = instance.disk_template
2949
      ispec_max = ispec.copy()
2950
      ispec_max[constants.ISPEC_MEM_SIZE] = \
2951
        self.be_new.get(constants.BE_MAXMEM, None)
2952
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
2953
                                                     new_disk_template)
2954
      ispec_min = ispec.copy()
2955
      ispec_min[constants.ISPEC_MEM_SIZE] = \
2956
        self.be_new.get(constants.BE_MINMEM, None)
2957
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
2958
                                                     new_disk_template)
2959

    
2960
      if (res_max or res_min):
2961
        # FIXME: Improve error message by including information about whether
2962
        # the upper or lower limit of the parameter fails the ipolicy.
2963
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
2964
               (group_info, group_info.name,
2965
                utils.CommaJoin(set(res_max + res_min))))
2966
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2967

    
2968
  def _ConvertPlainToDrbd(self, feedback_fn):
2969
    """Converts an instance from plain to drbd.
2970

2971
    """
2972
    feedback_fn("Converting template to drbd")
2973
    instance = self.instance
2974
    pnode = instance.primary_node
2975
    snode = self.op.remote_node
2976

    
2977
    assert instance.disk_template == constants.DT_PLAIN
2978

    
2979
    # create a fake disk info for _GenerateDiskTemplate
2980
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
2981
                  constants.IDISK_VG: d.logical_id[0],
2982
                  constants.IDISK_NAME: d.name}
2983
                 for d in instance.disks]
2984
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
2985
                                     instance.name, pnode, [snode],
2986
                                     disk_info, None, None, 0, feedback_fn,
2987
                                     self.diskparams)
2988
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
2989
                                        self.diskparams)
2990
    p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
2991
    s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
2992
    info = GetInstanceInfoText(instance)
2993
    feedback_fn("Creating additional volumes...")
2994
    # first, create the missing data and meta devices
2995
    for disk in anno_disks:
2996
      # unfortunately this is... not too nice
2997
      CreateSingleBlockDev(self, pnode, instance, disk.children[1],
2998
                           info, True, p_excl_stor)
2999
      for child in disk.children:
3000
        CreateSingleBlockDev(self, snode, instance, child, info, True,
3001
                             s_excl_stor)
3002
    # at this stage, all new LVs have been created, we can rename the
3003
    # old ones
3004
    feedback_fn("Renaming original volumes...")
3005
    rename_list = [(o, n.children[0].logical_id)
3006
                   for (o, n) in zip(instance.disks, new_disks)]
3007
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
3008
    result.Raise("Failed to rename original LVs")
3009

    
3010
    feedback_fn("Initializing DRBD devices...")
3011
    # all child devices are in place, we can now create the DRBD devices
3012
    try:
3013
      for disk in anno_disks:
3014
        for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3015
          f_create = node == pnode
3016
          CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3017
                               excl_stor)
3018
    except errors.GenericError, e:
3019
      feedback_fn("Initializing of DRBD devices failed;"
3020
                  " renaming back original volumes...")
3021
      for disk in new_disks:
3022
        self.cfg.SetDiskID(disk, pnode)
3023
      rename_back_list = [(n.children[0], o.logical_id)
3024
                          for (n, o) in zip(new_disks, instance.disks)]
3025
      result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3026
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3027
      raise
3028

    
3029
    # at this point, the instance has been modified
3030
    instance.disk_template = constants.DT_DRBD8
3031
    instance.disks = new_disks
3032
    self.cfg.Update(instance, feedback_fn)
3033

    
3034
    # Release node locks while waiting for sync
3035
    ReleaseLocks(self, locking.LEVEL_NODE)
3036

    
3037
    # disks are created, waiting for sync
3038
    disk_abort = not WaitForSync(self, instance,
3039
                                 oneshot=not self.op.wait_for_sync)
3040
    if disk_abort:
3041
      raise errors.OpExecError("There are some degraded disks for"
3042
                               " this instance, please cleanup manually")
3043

    
3044
    # Node resource locks will be released by caller
3045

    
3046
  def _ConvertDrbdToPlain(self, feedback_fn):
3047
    """Converts an instance from drbd to plain.
3048

3049
    """
3050
    instance = self.instance
3051

    
3052
    assert len(instance.secondary_nodes) == 1
3053
    assert instance.disk_template == constants.DT_DRBD8
3054

    
3055
    pnode = instance.primary_node
3056
    snode = instance.secondary_nodes[0]
3057
    feedback_fn("Converting template to plain")
3058

    
3059
    old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3060
    new_disks = [d.children[0] for d in instance.disks]
3061

    
3062
    # copy over size, mode and name
3063
    for parent, child in zip(old_disks, new_disks):
3064
      child.size = parent.size
3065
      child.mode = parent.mode
3066
      child.name = parent.name
3067

    
3068
    # this is a DRBD disk, return its port to the pool
3069
    # NOTE: this must be done right before the call to cfg.Update!
3070
    for disk in old_disks:
3071
      tcp_port = disk.logical_id[2]
3072
      self.cfg.AddTcpUdpPort(tcp_port)
3073

    
3074
    # update instance structure
3075
    instance.disks = new_disks
3076
    instance.disk_template = constants.DT_PLAIN
3077
    _UpdateIvNames(0, instance.disks)
3078
    self.cfg.Update(instance, feedback_fn)
3079

    
3080
    # Release locks in case removing disks takes a while
3081
    ReleaseLocks(self, locking.LEVEL_NODE)
3082

    
3083
    feedback_fn("Removing volumes on the secondary node...")
3084
    for disk in old_disks:
3085
      self.cfg.SetDiskID(disk, snode)
3086
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3087
      if msg:
3088
        self.LogWarning("Could not remove block device %s on node %s,"
3089
                        " continuing anyway: %s", disk.iv_name, snode, msg)
3090

    
3091
    feedback_fn("Removing unneeded volumes on the primary node...")
3092
    for idx, disk in enumerate(old_disks):
3093
      meta = disk.children[1]
3094
      self.cfg.SetDiskID(meta, pnode)
3095
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3096
      if msg:
3097
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3098
                        " continuing anyway: %s", idx, pnode, msg)
3099

    
3100
  def _CreateNewDisk(self, idx, params, _):
3101
    """Creates a new disk.
3102

3103
    """
3104
    instance = self.instance
3105

    
3106
    # add a new disk
3107
    if instance.disk_template in constants.DTS_FILEBASED:
3108
      (file_driver, file_path) = instance.disks[0].logical_id
3109
      file_path = os.path.dirname(file_path)
3110
    else:
3111
      file_driver = file_path = None
3112

    
3113
    disk = \
3114
      GenerateDiskTemplate(self, instance.disk_template, instance.name,
3115
                           instance.primary_node, instance.secondary_nodes,
3116
                           [params], file_path, file_driver, idx,
3117
                           self.Log, self.diskparams)[0]
3118

    
3119
    new_disks = CreateDisks(self, instance, disks=[disk])
3120

    
3121
    if self.cluster.prealloc_wipe_disks:
3122
      # Wipe new disk
3123
      WipeOrCleanupDisks(self, instance,
3124
                         disks=[(idx, disk, 0)],
3125
                         cleanup=new_disks)
3126

    
3127
    return (disk, [
3128
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3129
      ])
3130

    
3131
  @staticmethod
3132
  def _ModifyDisk(idx, disk, params, _):
3133
    """Modifies a disk.
3134

3135
    """
3136
    changes = []
3137
    mode = params.get(constants.IDISK_MODE, None)
3138
    if mode:
3139
      disk.mode = mode
3140
      changes.append(("disk.mode/%d" % idx, disk.mode))
3141

    
3142
    name = params.get(constants.IDISK_NAME, None)
3143
    disk.name = name
3144
    changes.append(("disk.name/%d" % idx, disk.name))
3145

    
3146
    return changes
3147

    
3148
  def _RemoveDisk(self, idx, root, _):
3149
    """Removes a disk.
3150

3151
    """
3152
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3153
    for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3154
      self.cfg.SetDiskID(disk, node)
3155
      msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3156
      if msg:
3157
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3158
                        " continuing anyway", idx, node, msg)
3159

    
3160
    # if this is a DRBD disk, return its port to the pool
3161
    if root.dev_type in constants.LDS_DRBD:
3162
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3163

    
3164
  def _CreateNewNic(self, idx, params, private):
3165
    """Creates data structure for a new network interface.
3166

3167
    """
3168
    mac = params[constants.INIC_MAC]
3169
    ip = params.get(constants.INIC_IP, None)
3170
    net = params.get(constants.INIC_NETWORK, None)
3171
    name = params.get(constants.INIC_NAME, None)
3172
    net_uuid = self.cfg.LookupNetwork(net)
3173
    #TODO: not private.filled?? can a nic have no nicparams??
3174
    nicparams = private.filled
3175
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3176
                       nicparams=nicparams)
3177
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3178

    
3179
    return (nobj, [
3180
      ("nic.%d" % idx,
3181
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3182
       (mac, ip, private.filled[constants.NIC_MODE],
3183
       private.filled[constants.NIC_LINK],
3184
       net)),
3185
      ])
3186

    
3187
  def _ApplyNicMods(self, idx, nic, params, private):
3188
    """Modifies a network interface.
3189

3190
    """
3191
    changes = []
3192

    
3193
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3194
      if key in params:
3195
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3196
        setattr(nic, key, params[key])
3197

    
3198
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3199
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3200
    if new_net_uuid != nic.network:
3201
      changes.append(("nic.network/%d" % idx, new_net))
3202
      nic.network = new_net_uuid
3203

    
3204
    if private.filled:
3205
      nic.nicparams = private.filled
3206

    
3207
      for (key, val) in nic.nicparams.items():
3208
        changes.append(("nic.%s/%d" % (key, idx), val))
3209

    
3210
    return changes
3211

    
3212
  def Exec(self, feedback_fn):
3213
    """Modifies an instance.
3214

3215
    All parameters take effect only at the next restart of the instance.
3216

3217
    """
3218
    # Process here the warnings from CheckPrereq, as we don't have a
3219
    # feedback_fn there.
3220
    # TODO: Replace with self.LogWarning
3221
    for warn in self.warn:
3222
      feedback_fn("WARNING: %s" % warn)
3223

    
3224
    assert ((self.op.disk_template is None) ^
3225
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3226
      "Not owning any node resource locks"
3227

    
3228
    result = []
3229
    instance = self.instance
3230

    
3231
    # New primary node
3232
    if self.op.pnode:
3233
      instance.primary_node = self.op.pnode
3234

    
3235
    # runtime memory
3236
    if self.op.runtime_mem:
3237
      rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3238
                                                     instance,
3239
                                                     self.op.runtime_mem)
3240
      rpcres.Raise("Cannot modify instance runtime memory")
3241
      result.append(("runtime_memory", self.op.runtime_mem))
3242

    
3243
    # Apply disk changes
3244
    _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3245
                        self._CreateNewDisk, self._ModifyDisk,
3246
                        self._RemoveDisk)
3247
    _UpdateIvNames(0, instance.disks)
3248

    
3249
    if self.op.disk_template:
3250
      if __debug__:
3251
        check_nodes = set(instance.all_nodes)
3252
        if self.op.remote_node:
3253
          check_nodes.add(self.op.remote_node)
3254
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3255
          owned = self.owned_locks(level)
3256
          assert not (check_nodes - owned), \
3257
            ("Not owning the correct locks, owning %r, expected at least %r" %
3258
             (owned, check_nodes))
3259

    
3260
      r_shut = ShutdownInstanceDisks(self, instance)
3261
      if not r_shut:
3262
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3263
                                 " proceed with disk template conversion")
3264
      mode = (instance.disk_template, self.op.disk_template)
3265
      try:
3266
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3267
      except:
3268
        self.cfg.ReleaseDRBDMinors(instance.name)
3269
        raise
3270
      result.append(("disk_template", self.op.disk_template))
3271

    
3272
      assert instance.disk_template == self.op.disk_template, \
3273
        ("Expected disk template '%s', found '%s'" %
3274
         (self.op.disk_template, instance.disk_template))
3275

    
3276
    # Release node and resource locks if there are any (they might already have
3277
    # been released during disk conversion)
3278
    ReleaseLocks(self, locking.LEVEL_NODE)
3279
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3280

    
3281
    # Apply NIC changes
3282
    if self._new_nics is not None:
3283
      instance.nics = self._new_nics
3284
      result.extend(self._nic_chgdesc)
3285

    
3286
    # hvparams changes
3287
    if self.op.hvparams:
3288
      instance.hvparams = self.hv_inst
3289
      for key, val in self.op.hvparams.iteritems():
3290
        result.append(("hv/%s" % key, val))
3291

    
3292
    # beparams changes
3293
    if self.op.beparams:
3294
      instance.beparams = self.be_inst
3295
      for key, val in self.op.beparams.iteritems():
3296
        result.append(("be/%s" % key, val))
3297

    
3298
    # OS change
3299
    if self.op.os_name:
3300
      instance.os = self.op.os_name
3301

    
3302
    # osparams changes
3303
    if self.op.osparams:
3304
      instance.osparams = self.os_inst
3305
      for key, val in self.op.osparams.iteritems():
3306
        result.append(("os/%s" % key, val))
3307

    
3308
    if self.op.offline is None:
3309
      # Ignore
3310
      pass
3311
    elif self.op.offline:
3312
      # Mark instance as offline
3313
      self.cfg.MarkInstanceOffline(instance.name)
3314
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3315
    else:
3316
      # Mark instance as online, but stopped
3317
      self.cfg.MarkInstanceDown(instance.name)
3318
      result.append(("admin_state", constants.ADMINST_DOWN))
3319

    
3320
    self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3321

    
3322
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3323
                self.owned_locks(locking.LEVEL_NODE)), \
3324
      "All node locks should have been released by now"
3325

    
3326
    return result
3327

    
3328
  _DISK_CONVERSIONS = {
3329
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3330
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3331
    }
3332

    
3333

    
3334
class LUInstanceChangeGroup(LogicalUnit):
3335
  HPATH = "instance-change-group"
3336
  HTYPE = constants.HTYPE_INSTANCE
3337
  REQ_BGL = False
3338

    
3339
  def ExpandNames(self):
3340
    self.share_locks = ShareAll()
3341

    
3342
    self.needed_locks = {
3343
      locking.LEVEL_NODEGROUP: [],
3344
      locking.LEVEL_NODE: [],
3345
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3346
      }
3347

    
3348
    self._ExpandAndLockInstance()
3349

    
3350
    if self.op.target_groups:
3351
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3352
                                  self.op.target_groups)
3353
    else:
3354
      self.req_target_uuids = None
3355

    
3356
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3357

    
3358
  def DeclareLocks(self, level):
3359
    if level == locking.LEVEL_NODEGROUP:
3360
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3361

    
3362
      if self.req_target_uuids:
3363
        lock_groups = set(self.req_target_uuids)
3364

    
3365
        # Lock all groups used by instance optimistically; this requires going
3366
        # via the node before it's locked, requiring verification later on
3367
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3368
        lock_groups.update(instance_groups)
3369
      else:
3370
        # No target groups, need to lock all of them
3371
        lock_groups = locking.ALL_SET
3372

    
3373
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3374

    
3375
    elif level == locking.LEVEL_NODE:
3376
      if self.req_target_uuids:
3377
        # Lock all nodes used by instances
3378
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3379
        self._LockInstancesNodes()
3380

    
3381
        # Lock all nodes in all potential target groups
3382
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3383
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3384
        member_nodes = [node_name
3385
                        for group in lock_groups
3386
                        for node_name in self.cfg.GetNodeGroup(group).members]
3387
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3388
      else:
3389
        # Lock all nodes as all groups are potential targets
3390
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3391

    
3392
  def CheckPrereq(self):
3393
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3394
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3395
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3396

    
3397
    assert (self.req_target_uuids is None or
3398
            owned_groups.issuperset(self.req_target_uuids))
3399
    assert owned_instances == set([self.op.instance_name])
3400

    
3401
    # Get instance information
3402
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3403

    
3404
    # Check if node groups for locked instance are still correct
3405
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3406
      ("Instance %s's nodes changed while we kept the lock" %
3407
       self.op.instance_name)
3408

    
3409
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3410
                                          owned_groups)
3411

    
3412
    if self.req_target_uuids:
3413
      # User requested specific target groups
3414
      self.target_uuids = frozenset(self.req_target_uuids)
3415
    else:
3416
      # All groups except those used by the instance are potential targets
3417
      self.target_uuids = owned_groups - inst_groups
3418

    
3419
    conflicting_groups = self.target_uuids & inst_groups
3420
    if conflicting_groups:
3421
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3422
                                 " used by the instance '%s'" %
3423
                                 (utils.CommaJoin(conflicting_groups),
3424
                                  self.op.instance_name),
3425
                                 errors.ECODE_INVAL)
3426

    
3427
    if not self.target_uuids:
3428
      raise errors.OpPrereqError("There are no possible target groups",
3429
                                 errors.ECODE_INVAL)
3430

    
3431
  def BuildHooksEnv(self):
3432
    """Build hooks env.
3433

3434
    """
3435
    assert self.target_uuids
3436

    
3437
    env = {
3438
      "TARGET_GROUPS": " ".join(self.target_uuids),
3439
      }
3440

    
3441
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3442

    
3443
    return env
3444

    
3445
  def BuildHooksNodes(self):
3446
    """Build hooks nodes.
3447

3448
    """
3449
    mn = self.cfg.GetMasterNode()
3450
    return ([mn], [mn])
3451

    
3452
  def Exec(self, feedback_fn):
3453
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3454

    
3455
    assert instances == [self.op.instance_name], "Instance not locked"
3456

    
3457
    req = iallocator.IAReqGroupChange(instances=instances,
3458
                                      target_groups=list(self.target_uuids))
3459
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3460

    
3461
    ial.Run(self.op.iallocator)
3462

    
3463
    if not ial.success:
3464
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3465
                                 " instance '%s' using iallocator '%s': %s" %
3466
                                 (self.op.instance_name, self.op.iallocator,
3467
                                  ial.info), errors.ECODE_NORES)
3468

    
3469
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3470

    
3471
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3472
                 " instance '%s'", len(jobs), self.op.instance_name)
3473

    
3474
    return ResultWithJobs(jobs)