Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ e8dd6643

History | View | Annotate | Download (156.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
from ganeti import serializer
41
import ganeti.rpc.node as rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, CheckOSImage, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
53
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
54
from ganeti.cmdlib.instance_storage import CreateDisks, \
55
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, ImageDisks, \
56
  WaitForSync, IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, \
57
  ComputeDisks, CheckRADOSFreeSpace, ComputeDiskSizePerVG, \
58
  GenerateDiskTemplate, StartInstanceDisks, ShutdownInstanceDisks, \
59
  AssembleInstanceDisks, CheckSpindlesExclusiveStorage
60
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
61
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
62
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
63
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65
  CheckInstanceBridgesExist, CheckNicsBridgesExist, UpdateMetadata
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_name_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176
    vlan = nic.get(constants.INIC_VLAN, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    # ip validity checks
187
    if ip is None or ip.lower() == constants.VALUE_NONE:
188
      nic_ip = None
189
    elif ip.lower() == constants.VALUE_AUTO:
190
      if not op.name_check:
191
        raise errors.OpPrereqError("IP address set to auto but name checks"
192
                                   " have been skipped",
193
                                   errors.ECODE_INVAL)
194
      nic_ip = default_ip
195
    else:
196
      # We defer pool operations until later, so that the iallocator has
197
      # filled in the instance's node(s) dimara
198
      if ip.lower() == constants.NIC_IP_POOL:
199
        if net is None:
200
          raise errors.OpPrereqError("if ip=pool, parameter network"
201
                                     " must be passed too",
202
                                     errors.ECODE_INVAL)
203

    
204
      elif not netutils.IPAddress.IsValid(ip):
205
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
206
                                   errors.ECODE_INVAL)
207

    
208
      nic_ip = ip
209

    
210
    # TODO: check the ip address for uniqueness
211
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
212
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
213
                                 errors.ECODE_INVAL)
214

    
215
    # MAC address verification
216
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
217
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
218
      mac = utils.NormalizeAndValidateMac(mac)
219

    
220
      try:
221
        # TODO: We need to factor this out
222
        cfg.ReserveMAC(mac, ec_id)
223
      except errors.ReservationError:
224
        raise errors.OpPrereqError("MAC address %s already in use"
225
                                   " in cluster" % mac,
226
                                   errors.ECODE_NOTUNIQUE)
227

    
228
    #  Build nic parameters
229
    nicparams = {}
230
    if nic_mode_req:
231
      nicparams[constants.NIC_MODE] = nic_mode
232
    if link:
233
      nicparams[constants.NIC_LINK] = link
234
    if vlan:
235
      nicparams[constants.NIC_VLAN] = vlan
236

    
237
    check_params = cluster.SimpleFillNIC(nicparams)
238
    objects.NIC.CheckParameterSyntax(check_params)
239
    net_uuid = cfg.LookupNetwork(net)
240
    name = nic.get(constants.INIC_NAME, None)
241
    if name is not None and name.lower() == constants.VALUE_NONE:
242
      name = None
243
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
244
                          network=net_uuid, nicparams=nicparams)
245
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
246
    nics.append(nic_obj)
247

    
248
  return nics
249

    
250

    
251
def _CheckForConflictingIp(lu, ip, node_uuid):
252
  """In case of conflicting IP address raise error.
253

254
  @type ip: string
255
  @param ip: IP address
256
  @type node_uuid: string
257
  @param node_uuid: node UUID
258

259
  """
260
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
261
  if conf_net is not None:
262
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
263
                                " network %s, but the target NIC does not." %
264
                                (ip, conf_net)),
265
                               errors.ECODE_STATE)
266

    
267
  return (None, None)
268

    
269

    
270
def _ComputeIPolicyInstanceSpecViolation(
271
  ipolicy, instance_spec, disk_template,
272
  _compute_fn=ComputeIPolicySpecViolation):
273
  """Compute if instance specs meets the specs of ipolicy.
274

275
  @type ipolicy: dict
276
  @param ipolicy: The ipolicy to verify against
277
  @param instance_spec: dict
278
  @param instance_spec: The instance spec to verify
279
  @type disk_template: string
280
  @param disk_template: the disk template of the instance
281
  @param _compute_fn: The function to verify ipolicy (unittest only)
282
  @see: L{ComputeIPolicySpecViolation}
283

284
  """
285
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
286
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
287
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
288
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
289
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
290
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
291

    
292
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
293
                     disk_sizes, spindle_use, disk_template)
294

    
295

    
296
def _ComputeInstanceCommunicationNIC(instance_name):
297
  """Compute the name of the instance NIC used by instance
298
  communication.
299

300
  With instance communication, a new NIC is added to the instance.
301
  This NIC has a special name that identities it as being part of
302
  instance communication, and not just a normal NIC.  This function
303
  generates the name of the NIC based on a prefix and the instance
304
  name
305

306
  @type instance_name: string
307
  @param instance_name: name of the instance the NIC belongs to
308

309
  @rtype: string
310
  @return: name of the NIC
311

312
  """
313
  return constants.INSTANCE_COMMUNICATION_NIC_PREFIX + instance_name
314

    
315

    
316
class LUInstanceCreate(LogicalUnit):
317
  """Create an instance.
318

319
  """
320
  HPATH = "instance-add"
321
  HTYPE = constants.HTYPE_INSTANCE
322
  REQ_BGL = False
323

    
324
  def _CheckDiskTemplateValid(self):
325
    """Checks validity of disk template.
326

327
    """
328
    cluster = self.cfg.GetClusterInfo()
329
    if self.op.disk_template is None:
330
      # FIXME: It would be better to take the default disk template from the
331
      # ipolicy, but for the ipolicy we need the primary node, which we get from
332
      # the iallocator, which wants the disk template as input. To solve this
333
      # chicken-and-egg problem, it should be possible to specify just a node
334
      # group from the iallocator and take the ipolicy from that.
335
      self.op.disk_template = cluster.enabled_disk_templates[0]
336
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
337

    
338
  def _CheckDiskArguments(self):
339
    """Checks validity of disk-related arguments.
340

341
    """
342
    # check that disk's names are unique and valid
343
    utils.ValidateDeviceNames("disk", self.op.disks)
344

    
345
    self._CheckDiskTemplateValid()
346

    
347
    # check disks. parameter names and consistent adopt/no-adopt strategy
348
    has_adopt = has_no_adopt = False
349
    for disk in self.op.disks:
350
      if self.op.disk_template != constants.DT_EXT:
351
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
352
      if constants.IDISK_ADOPT in disk:
353
        has_adopt = True
354
      else:
355
        has_no_adopt = True
356
    if has_adopt and has_no_adopt:
357
      raise errors.OpPrereqError("Either all disks are adopted or none is",
358
                                 errors.ECODE_INVAL)
359
    if has_adopt:
360
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
361
        raise errors.OpPrereqError("Disk adoption is not supported for the"
362
                                   " '%s' disk template" %
363
                                   self.op.disk_template,
364
                                   errors.ECODE_INVAL)
365
      if self.op.iallocator is not None:
366
        raise errors.OpPrereqError("Disk adoption not allowed with an"
367
                                   " iallocator script", errors.ECODE_INVAL)
368
      if self.op.mode == constants.INSTANCE_IMPORT:
369
        raise errors.OpPrereqError("Disk adoption not allowed for"
370
                                   " instance import", errors.ECODE_INVAL)
371
    else:
372
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
373
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
374
                                   " but no 'adopt' parameter given" %
375
                                   self.op.disk_template,
376
                                   errors.ECODE_INVAL)
377

    
378
    self.adopt_disks = has_adopt
379

    
380
  def _CheckVLANArguments(self):
381
    """ Check validity of VLANs if given
382

383
    """
384
    for nic in self.op.nics:
385
      vlan = nic.get(constants.INIC_VLAN, None)
386
      if vlan:
387
        if vlan[0] == ".":
388
          # vlan starting with dot means single untagged vlan,
389
          # might be followed by trunk (:)
390
          if not vlan[1:].isdigit():
391
            vlanlist = vlan[1:].split(':')
392
            for vl in vlanlist:
393
              if not vl.isdigit():
394
                raise errors.OpPrereqError("Specified VLAN parameter is "
395
                                           "invalid : %s" % vlan,
396
                                             errors.ECODE_INVAL)
397
        elif vlan[0] == ":":
398
          # Trunk - tagged only
399
          vlanlist = vlan[1:].split(':')
400
          for vl in vlanlist:
401
            if not vl.isdigit():
402
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
403
                                           " : %s" % vlan, errors.ECODE_INVAL)
404
        elif vlan.isdigit():
405
          # This is the simplest case. No dots, only single digit
406
          # -> Create untagged access port, dot needs to be added
407
          nic[constants.INIC_VLAN] = "." + vlan
408
        else:
409
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
410
                                       " : %s" % vlan, errors.ECODE_INVAL)
411

    
412
  def CheckArguments(self):
413
    """Check arguments.
414

415
    """
416
    # do not require name_check to ease forward/backward compatibility
417
    # for tools
418
    if self.op.no_install and self.op.start:
419
      self.LogInfo("No-installation mode selected, disabling startup")
420
      self.op.start = False
421
    # validate/normalize the instance name
422
    self.op.instance_name = \
423
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
424

    
425
    if self.op.ip_check and not self.op.name_check:
426
      # TODO: make the ip check more flexible and not depend on the name check
427
      raise errors.OpPrereqError("Cannot do IP address check without a name"
428
                                 " check", errors.ECODE_INVAL)
429

    
430
    # add NIC for instance communication
431
    if self.op.instance_communication:
432
      nic_name = _ComputeInstanceCommunicationNIC(self.op.instance_name)
433

    
434
      self.op.nics.append({constants.INIC_NAME: nic_name,
435
                           constants.INIC_MAC: constants.VALUE_GENERATE,
436
                           constants.INIC_IP: constants.NIC_IP_POOL,
437
                           constants.INIC_NETWORK:
438
                             self.cfg.GetInstanceCommunicationNetwork()})
439

    
440
    # check nics' parameter names
441
    for nic in self.op.nics:
442
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
443
    # check that NIC's parameters names are unique and valid
444
    utils.ValidateDeviceNames("NIC", self.op.nics)
445

    
446
    self._CheckVLANArguments()
447

    
448
    self._CheckDiskArguments()
449
    assert self.op.disk_template is not None
450

    
451
    # instance name verification
452
    if self.op.name_check:
453
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
454
      self.op.instance_name = self.hostname.name
455
      # used in CheckPrereq for ip ping check
456
      self.check_ip = self.hostname.ip
457
    else:
458
      self.check_ip = None
459

    
460
    # file storage checks
461
    if (self.op.file_driver and
462
        not self.op.file_driver in constants.FILE_DRIVER):
463
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
464
                                 self.op.file_driver, errors.ECODE_INVAL)
465

    
466
    # set default file_driver if unset and required
467
    if (not self.op.file_driver and
468
        self.op.disk_template in constants.DTS_FILEBASED):
469
      self.op.file_driver = constants.FD_LOOP
470

    
471
    ### Node/iallocator related checks
472
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
473

    
474
    if self.op.pnode is not None:
475
      if self.op.disk_template in constants.DTS_INT_MIRROR:
476
        if self.op.snode is None:
477
          raise errors.OpPrereqError("The networked disk templates need"
478
                                     " a mirror node", errors.ECODE_INVAL)
479
      elif self.op.snode:
480
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
481
                        " template")
482
        self.op.snode = None
483

    
484
    _CheckOpportunisticLocking(self.op)
485

    
486
    if self.op.mode == constants.INSTANCE_IMPORT:
487
      # On import force_variant must be True, because if we forced it at
488
      # initial install, our only chance when importing it back is that it
489
      # works again!
490
      self.op.force_variant = True
491

    
492
      if self.op.no_install:
493
        self.LogInfo("No-installation mode has no effect during import")
494

    
495
      if objects.GetOSImage(self.op.osparams):
496
        self.LogInfo("OS image has no effect during import")
497
    elif self.op.mode == constants.INSTANCE_CREATE:
498
      os_image = CheckOSImage(self.op)
499

    
500
      if self.op.os_type is None and os_image is None:
501
        raise errors.OpPrereqError("No guest OS or OS image specified",
502
                                   errors.ECODE_INVAL)
503

    
504
      if self.op.os_type is not None \
505
            and self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
506
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
507
                                   " installation" % self.op.os_type,
508
                                   errors.ECODE_STATE)
509
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
510
      if objects.GetOSImage(self.op.osparams):
511
        self.LogInfo("OS image has no effect during import")
512

    
513
      self._cds = GetClusterDomainSecret()
514

    
515
      # Check handshake to ensure both clusters have the same domain secret
516
      src_handshake = self.op.source_handshake
517
      if not src_handshake:
518
        raise errors.OpPrereqError("Missing source handshake",
519
                                   errors.ECODE_INVAL)
520

    
521
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
522
                                                           src_handshake)
523
      if errmsg:
524
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
525
                                   errors.ECODE_INVAL)
526

    
527
      # Load and check source CA
528
      self.source_x509_ca_pem = self.op.source_x509_ca
529
      if not self.source_x509_ca_pem:
530
        raise errors.OpPrereqError("Missing source X509 CA",
531
                                   errors.ECODE_INVAL)
532

    
533
      try:
534
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
535
                                                    self._cds)
536
      except OpenSSL.crypto.Error, err:
537
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
538
                                   (err, ), errors.ECODE_INVAL)
539

    
540
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
541
      if errcode is not None:
542
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
543
                                   errors.ECODE_INVAL)
544

    
545
      self.source_x509_ca = cert
546

    
547
      src_instance_name = self.op.source_instance_name
548
      if not src_instance_name:
549
        raise errors.OpPrereqError("Missing source instance name",
550
                                   errors.ECODE_INVAL)
551

    
552
      self.source_instance_name = \
553
        netutils.GetHostname(name=src_instance_name).name
554

    
555
    else:
556
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
557
                                 self.op.mode, errors.ECODE_INVAL)
558

    
559
  def ExpandNames(self):
560
    """ExpandNames for CreateInstance.
561

562
    Figure out the right locks for instance creation.
563

564
    """
565
    self.needed_locks = {}
566

    
567
    # this is just a preventive check, but someone might still add this
568
    # instance in the meantime, and creation will fail at lock-add time
569
    if self.op.instance_name in\
570
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
571
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
572
                                 self.op.instance_name, errors.ECODE_EXISTS)
573

    
574
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
575

    
576
    if self.op.iallocator:
577
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
578
      # specifying a group on instance creation and then selecting nodes from
579
      # that group
580
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
581
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
582

    
583
      if self.op.opportunistic_locking:
584
        self.opportunistic_locks[locking.LEVEL_NODE] = True
585
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
586
    else:
587
      (self.op.pnode_uuid, self.op.pnode) = \
588
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
589
      nodelist = [self.op.pnode_uuid]
590
      if self.op.snode is not None:
591
        (self.op.snode_uuid, self.op.snode) = \
592
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
593
        nodelist.append(self.op.snode_uuid)
594
      self.needed_locks[locking.LEVEL_NODE] = nodelist
595

    
596
    # in case of import lock the source node too
597
    if self.op.mode == constants.INSTANCE_IMPORT:
598
      src_node = self.op.src_node
599
      src_path = self.op.src_path
600

    
601
      if src_path is None:
602
        self.op.src_path = src_path = self.op.instance_name
603

    
604
      if src_node is None:
605
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
606
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
607
        self.op.src_node = None
608
        if os.path.isabs(src_path):
609
          raise errors.OpPrereqError("Importing an instance from a path"
610
                                     " requires a source node option",
611
                                     errors.ECODE_INVAL)
612
      else:
613
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
614
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
615
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
616
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
617
        if not os.path.isabs(src_path):
618
          self.op.src_path = \
619
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
620

    
621
    self.needed_locks[locking.LEVEL_NODE_RES] = \
622
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
623

    
624
    # Optimistically acquire shared group locks (we're reading the
625
    # configuration).  We can't just call GetInstanceNodeGroups, because the
626
    # instance doesn't exist yet. Therefore we lock all node groups of all
627
    # nodes we have.
628
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
629
      # In the case we lock all nodes for opportunistic allocation, we have no
630
      # choice than to lock all groups, because they're allocated before nodes.
631
      # This is sad, but true. At least we release all those we don't need in
632
      # CheckPrereq later.
633
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
634
    else:
635
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
636
        list(self.cfg.GetNodeGroupsFromNodes(
637
          self.needed_locks[locking.LEVEL_NODE]))
638
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
639

    
640
  def _RunAllocator(self):
641
    """Run the allocator based on input opcode.
642

643
    """
644
    if self.op.opportunistic_locking:
645
      # Only consider nodes for which a lock is held
646
      node_name_whitelist = self.cfg.GetNodeNames(
647
        set(self.owned_locks(locking.LEVEL_NODE)) &
648
        set(self.owned_locks(locking.LEVEL_NODE_RES)))
649
    else:
650
      node_name_whitelist = None
651

    
652
    req = _CreateInstanceAllocRequest(self.op, self.disks,
653
                                      self.nics, self.be_full,
654
                                      node_name_whitelist)
655
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
656

    
657
    ial.Run(self.op.iallocator)
658

    
659
    if not ial.success:
660
      # When opportunistic locks are used only a temporary failure is generated
661
      if self.op.opportunistic_locking:
662
        ecode = errors.ECODE_TEMP_NORES
663
      else:
664
        ecode = errors.ECODE_NORES
665

    
666
      raise errors.OpPrereqError("Can't compute nodes using"
667
                                 " iallocator '%s': %s" %
668
                                 (self.op.iallocator, ial.info),
669
                                 ecode)
670

    
671
    (self.op.pnode_uuid, self.op.pnode) = \
672
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
673
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
674
                 self.op.instance_name, self.op.iallocator,
675
                 utils.CommaJoin(ial.result))
676

    
677
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
678

    
679
    if req.RequiredNodes() == 2:
680
      (self.op.snode_uuid, self.op.snode) = \
681
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
682

    
683
  def BuildHooksEnv(self):
684
    """Build hooks env.
685

686
    This runs on master, primary and secondary nodes of the instance.
687

688
    """
689
    env = {
690
      "ADD_MODE": self.op.mode,
691
      }
692
    if self.op.mode == constants.INSTANCE_IMPORT:
693
      env["SRC_NODE"] = self.op.src_node
694
      env["SRC_PATH"] = self.op.src_path
695
      env["SRC_IMAGES"] = self.src_images
696

    
697
    env.update(BuildInstanceHookEnv(
698
      name=self.op.instance_name,
699
      primary_node_name=self.op.pnode,
700
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
701
      status=self.op.start,
702
      os_type=self.op.os_type,
703
      minmem=self.be_full[constants.BE_MINMEM],
704
      maxmem=self.be_full[constants.BE_MAXMEM],
705
      vcpus=self.be_full[constants.BE_VCPUS],
706
      nics=NICListToTuple(self, self.nics),
707
      disk_template=self.op.disk_template,
708
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
709
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
710
             for d in self.disks],
711
      bep=self.be_full,
712
      hvp=self.hv_full,
713
      hypervisor_name=self.op.hypervisor,
714
      tags=self.op.tags,
715
      ))
716

    
717
    return env
718

    
719
  def BuildHooksNodes(self):
720
    """Build hooks nodes.
721

722
    """
723
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
724
    return nl, nl
725

    
726
  def _ReadExportInfo(self):
727
    """Reads the export information from disk.
728

729
    It will override the opcode source node and path with the actual
730
    information, if these two were not specified before.
731

732
    @return: the export information
733

734
    """
735
    assert self.op.mode == constants.INSTANCE_IMPORT
736

    
737
    if self.op.src_node_uuid is None:
738
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
739
      exp_list = self.rpc.call_export_list(locked_nodes)
740
      found = False
741
      for node_uuid in exp_list:
742
        if exp_list[node_uuid].fail_msg:
743
          continue
744
        if self.op.src_path in exp_list[node_uuid].payload:
745
          found = True
746
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
747
          self.op.src_node_uuid = node_uuid
748
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
749
                                            self.op.src_path)
750
          break
751
      if not found:
752
        raise errors.OpPrereqError("No export found for relative path %s" %
753
                                   self.op.src_path, errors.ECODE_INVAL)
754

    
755
    CheckNodeOnline(self, self.op.src_node_uuid)
756
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
757
    result.Raise("No export or invalid export found in dir %s" %
758
                 self.op.src_path)
759

    
760
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
761
    if not export_info.has_section(constants.INISECT_EXP):
762
      raise errors.ProgrammerError("Corrupted export config",
763
                                   errors.ECODE_ENVIRON)
764

    
765
    ei_version = export_info.get(constants.INISECT_EXP, "version")
766
    if int(ei_version) != constants.EXPORT_VERSION:
767
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
768
                                 (ei_version, constants.EXPORT_VERSION),
769
                                 errors.ECODE_ENVIRON)
770
    return export_info
771

    
772
  def _ReadExportParams(self, einfo):
773
    """Use export parameters as defaults.
774

775
    In case the opcode doesn't specify (as in override) some instance
776
    parameters, then try to use them from the export information, if
777
    that declares them.
778

779
    """
780
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
781

    
782
    if not self.op.disks:
783
      disks = []
784
      # TODO: import the disk iv_name too
785
      for idx in range(constants.MAX_DISKS):
786
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
787
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
788
          disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
789
          disk = {
790
            constants.IDISK_SIZE: disk_sz,
791
            constants.IDISK_NAME: disk_name
792
            }
793
          disks.append(disk)
794
      self.op.disks = disks
795
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
796
        raise errors.OpPrereqError("No disk info specified and the export"
797
                                   " is missing the disk information",
798
                                   errors.ECODE_INVAL)
799

    
800
    if not self.op.nics:
801
      nics = []
802
      for idx in range(constants.MAX_NICS):
803
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
804
          ndict = {}
805
          for name in [constants.INIC_IP,
806
                       constants.INIC_MAC, constants.INIC_NAME]:
807
            nic_param_name = "nic%d_%s" % (idx, name)
808
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
809
              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
810
              ndict[name] = v
811
          network = einfo.get(constants.INISECT_INS,
812
                              "nic%d_%s" % (idx, constants.INIC_NETWORK))
813
          # in case network is given link and mode are inherited
814
          # from nodegroup's netparams and thus should not be passed here
815
          if network:
816
            ndict[constants.INIC_NETWORK] = network
817
          else:
818
            for name in list(constants.NICS_PARAMETERS):
819
              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
820
              ndict[name] = v
821
          nics.append(ndict)
822
        else:
823
          break
824
      self.op.nics = nics
825

    
826
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
827
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
828

    
829
    if (self.op.hypervisor is None and
830
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
831
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
832

    
833
    if einfo.has_section(constants.INISECT_HYP):
834
      # use the export parameters but do not override the ones
835
      # specified by the user
836
      for name, value in einfo.items(constants.INISECT_HYP):
837
        if name not in self.op.hvparams:
838
          self.op.hvparams[name] = value
839

    
840
    if einfo.has_section(constants.INISECT_BEP):
841
      # use the parameters, without overriding
842
      for name, value in einfo.items(constants.INISECT_BEP):
843
        if name not in self.op.beparams:
844
          self.op.beparams[name] = value
845
        # Compatibility for the old "memory" be param
846
        if name == constants.BE_MEMORY:
847
          if constants.BE_MAXMEM not in self.op.beparams:
848
            self.op.beparams[constants.BE_MAXMEM] = value
849
          if constants.BE_MINMEM not in self.op.beparams:
850
            self.op.beparams[constants.BE_MINMEM] = value
851
    else:
852
      # try to read the parameters old style, from the main section
853
      for name in constants.BES_PARAMETERS:
854
        if (name not in self.op.beparams and
855
            einfo.has_option(constants.INISECT_INS, name)):
856
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
857

    
858
    if einfo.has_section(constants.INISECT_OSP):
859
      # use the parameters, without overriding
860
      for name, value in einfo.items(constants.INISECT_OSP):
861
        if name not in self.op.osparams:
862
          self.op.osparams[name] = value
863

    
864
    if einfo.has_section(constants.INISECT_OSP_PRIVATE):
865
      # use the parameters, without overriding
866
      for name, value in einfo.items(constants.INISECT_OSP_PRIVATE):
867
        if name not in self.op.osparams_private:
868
          self.op.osparams_private[name] = serializer.Private(value, descr=name)
869

    
870
  def _RevertToDefaults(self, cluster):
871
    """Revert the instance parameters to the default values.
872

873
    """
874
    # hvparams
875
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
876
    for name in self.op.hvparams.keys():
877
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
878
        del self.op.hvparams[name]
879
    # beparams
880
    be_defs = cluster.SimpleFillBE({})
881
    for name in self.op.beparams.keys():
882
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
883
        del self.op.beparams[name]
884
    # nic params
885
    nic_defs = cluster.SimpleFillNIC({})
886
    for nic in self.op.nics:
887
      for name in constants.NICS_PARAMETERS:
888
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
889
          del nic[name]
890
    # osparams
891
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
892
    for name in self.op.osparams.keys():
893
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
894
        del self.op.osparams[name]
895

    
896
    os_defs_ = cluster.SimpleFillOS(self.op.os_type, {},
897
                                    os_params_private={})
898
    for name in self.op.osparams_private.keys():
899
      if name in os_defs_ and os_defs_[name] == self.op.osparams_private[name]:
900
        del self.op.osparams_private[name]
901

    
902
  def _CalculateFileStorageDir(self):
903
    """Calculate final instance file storage dir.
904

905
    """
906
    # file storage dir calculation/check
907
    self.instance_file_storage_dir = None
908
    if self.op.disk_template in constants.DTS_FILEBASED:
909
      # build the full file storage dir path
910
      joinargs = []
911

    
912
      cfg_storage = None
913
      if self.op.disk_template == constants.DT_FILE:
914
        cfg_storage = self.cfg.GetFileStorageDir()
915
      elif self.op.disk_template == constants.DT_SHARED_FILE:
916
        cfg_storage = self.cfg.GetSharedFileStorageDir()
917
      elif self.op.disk_template == constants.DT_GLUSTER:
918
        cfg_storage = self.cfg.GetGlusterStorageDir()
919

    
920
      if not cfg_storage:
921
        raise errors.OpPrereqError(
922
          "Cluster file storage dir for {tpl} storage type not defined".format(
923
            tpl=repr(self.op.disk_template)
924
          ),
925
          errors.ECODE_STATE
926
      )
927

    
928
      joinargs.append(cfg_storage)
929

    
930
      if self.op.file_storage_dir is not None:
931
        joinargs.append(self.op.file_storage_dir)
932

    
933
      if self.op.disk_template != constants.DT_GLUSTER:
934
        joinargs.append(self.op.instance_name)
935

    
936
      if len(joinargs) > 1:
937
        # pylint: disable=W0142
938
        self.instance_file_storage_dir = utils.PathJoin(*joinargs)
939
      else:
940
        self.instance_file_storage_dir = joinargs[0]
941

    
942
  def CheckPrereq(self): # pylint: disable=R0914
943
    """Check prerequisites.
944

945
    """
946
    # Check that the optimistically acquired groups are correct wrt the
947
    # acquired nodes
948
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
949
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
950
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
951
    if not owned_groups.issuperset(cur_groups):
952
      raise errors.OpPrereqError("New instance %s's node groups changed since"
953
                                 " locks were acquired, current groups are"
954
                                 " are '%s', owning groups '%s'; retry the"
955
                                 " operation" %
956
                                 (self.op.instance_name,
957
                                  utils.CommaJoin(cur_groups),
958
                                  utils.CommaJoin(owned_groups)),
959
                                 errors.ECODE_STATE)
960

    
961
    self._CalculateFileStorageDir()
962

    
963
    if self.op.mode == constants.INSTANCE_IMPORT:
964
      export_info = self._ReadExportInfo()
965
      self._ReadExportParams(export_info)
966
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
967
    else:
968
      self._old_instance_name = None
969

    
970
    if (not self.cfg.GetVGName() and
971
        self.op.disk_template not in constants.DTS_NOT_LVM):
972
      raise errors.OpPrereqError("Cluster does not support lvm-based"
973
                                 " instances", errors.ECODE_STATE)
974

    
975
    if (self.op.hypervisor is None or
976
        self.op.hypervisor == constants.VALUE_AUTO):
977
      self.op.hypervisor = self.cfg.GetHypervisorType()
978

    
979
    cluster = self.cfg.GetClusterInfo()
980
    enabled_hvs = cluster.enabled_hypervisors
981
    if self.op.hypervisor not in enabled_hvs:
982
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
983
                                 " cluster (%s)" %
984
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
985
                                 errors.ECODE_STATE)
986

    
987
    # Check tag validity
988
    for tag in self.op.tags:
989
      objects.TaggableObject.ValidateTag(tag)
990

    
991
    # check hypervisor parameter syntax (locally)
992
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
993
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
994
                                      self.op.hvparams)
995
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
996
    hv_type.CheckParameterSyntax(filled_hvp)
997
    self.hv_full = filled_hvp
998
    # check that we don't specify global parameters on an instance
999
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
1000
                         "instance", "cluster")
1001

    
1002
    # fill and remember the beparams dict
1003
    self.be_full = _ComputeFullBeParams(self.op, cluster)
1004

    
1005
    # build os parameters
1006
    if self.op.osparams_private is None:
1007
      self.op.osparams_private = serializer.PrivateDict()
1008
    if self.op.osparams_secret is None:
1009
      self.op.osparams_secret = serializer.PrivateDict()
1010

    
1011
    self.os_full = cluster.SimpleFillOS(
1012
      self.op.os_type,
1013
      self.op.osparams,
1014
      os_params_private=self.op.osparams_private,
1015
      os_params_secret=self.op.osparams_secret
1016
    )
1017

    
1018
    # now that hvp/bep are in final format, let's reset to defaults,
1019
    # if told to do so
1020
    if self.op.identify_defaults:
1021
      self._RevertToDefaults(cluster)
1022

    
1023
    # NIC buildup
1024
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
1025
                             self.proc.GetECId())
1026

    
1027
    # disk checks/pre-build
1028
    default_vg = self.cfg.GetVGName()
1029
    self.disks = ComputeDisks(self.op, default_vg)
1030

    
1031
    if self.op.mode == constants.INSTANCE_IMPORT:
1032
      disk_images = []
1033
      for idx in range(len(self.disks)):
1034
        option = "disk%d_dump" % idx
1035
        if export_info.has_option(constants.INISECT_INS, option):
1036
          # FIXME: are the old os-es, disk sizes, etc. useful?
1037
          export_name = export_info.get(constants.INISECT_INS, option)
1038
          image = utils.PathJoin(self.op.src_path, export_name)
1039
          disk_images.append(image)
1040
        else:
1041
          disk_images.append(False)
1042

    
1043
      self.src_images = disk_images
1044

    
1045
      if self.op.instance_name == self._old_instance_name:
1046
        for idx, nic in enumerate(self.nics):
1047
          if nic.mac == constants.VALUE_AUTO:
1048
            nic_mac_ini = "nic%d_mac" % idx
1049
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
1050

    
1051
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
1052

    
1053
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
1054
    if self.op.ip_check:
1055
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
1056
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1057
                                   (self.check_ip, self.op.instance_name),
1058
                                   errors.ECODE_NOTUNIQUE)
1059

    
1060
    #### mac address generation
1061
    # By generating here the mac address both the allocator and the hooks get
1062
    # the real final mac address rather than the 'auto' or 'generate' value.
1063
    # There is a race condition between the generation and the instance object
1064
    # creation, which means that we know the mac is valid now, but we're not
1065
    # sure it will be when we actually add the instance. If things go bad
1066
    # adding the instance will abort because of a duplicate mac, and the
1067
    # creation job will fail.
1068
    for nic in self.nics:
1069
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
1070
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
1071

    
1072
    #### allocator run
1073

    
1074
    if self.op.iallocator is not None:
1075
      self._RunAllocator()
1076

    
1077
    # Release all unneeded node locks
1078
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1079
                               self.op.src_node_uuid])
1080
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1081
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1082
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1083
    # Release all unneeded group locks
1084
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1085
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1086

    
1087
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1088
            self.owned_locks(locking.LEVEL_NODE_RES)), \
1089
      "Node locks differ from node resource locks"
1090

    
1091
    #### node related checks
1092

    
1093
    # check primary node
1094
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1095
    assert self.pnode is not None, \
1096
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1097
    if pnode.offline:
1098
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1099
                                 pnode.name, errors.ECODE_STATE)
1100
    if pnode.drained:
1101
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1102
                                 pnode.name, errors.ECODE_STATE)
1103
    if not pnode.vm_capable:
1104
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1105
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1106

    
1107
    self.secondaries = []
1108

    
1109
    # Fill in any IPs from IP pools. This must happen here, because we need to
1110
    # know the nic's primary node, as specified by the iallocator
1111
    for idx, nic in enumerate(self.nics):
1112
      net_uuid = nic.network
1113
      if net_uuid is not None:
1114
        nobj = self.cfg.GetNetwork(net_uuid)
1115
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1116
        if netparams is None:
1117
          raise errors.OpPrereqError("No netparams found for network"
1118
                                     " %s. Probably not connected to"
1119
                                     " node's %s nodegroup" %
1120
                                     (nobj.name, self.pnode.name),
1121
                                     errors.ECODE_INVAL)
1122
        self.LogInfo("NIC/%d inherits netparams %s" %
1123
                     (idx, netparams.values()))
1124
        nic.nicparams = dict(netparams)
1125
        if nic.ip is not None:
1126
          if nic.ip.lower() == constants.NIC_IP_POOL:
1127
            try:
1128
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1129
            except errors.ReservationError:
1130
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1131
                                         " from the address pool" % idx,
1132
                                         errors.ECODE_STATE)
1133
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1134
          else:
1135
            try:
1136
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1137
                                 check=self.op.conflicts_check)
1138
            except errors.ReservationError:
1139
              raise errors.OpPrereqError("IP address %s already in use"
1140
                                         " or does not belong to network %s" %
1141
                                         (nic.ip, nobj.name),
1142
                                         errors.ECODE_NOTUNIQUE)
1143

    
1144
      # net is None, ip None or given
1145
      elif self.op.conflicts_check:
1146
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1147

    
1148
    # mirror node verification
1149
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1150
      if self.op.snode_uuid == pnode.uuid:
1151
        raise errors.OpPrereqError("The secondary node cannot be the"
1152
                                   " primary node", errors.ECODE_INVAL)
1153
      CheckNodeOnline(self, self.op.snode_uuid)
1154
      CheckNodeNotDrained(self, self.op.snode_uuid)
1155
      CheckNodeVmCapable(self, self.op.snode_uuid)
1156
      self.secondaries.append(self.op.snode_uuid)
1157

    
1158
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1159
      if pnode.group != snode.group:
1160
        self.LogWarning("The primary and secondary nodes are in two"
1161
                        " different node groups; the disk parameters"
1162
                        " from the first disk's node group will be"
1163
                        " used")
1164

    
1165
    nodes = [pnode]
1166
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1167
      nodes.append(snode)
1168
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1169
    excl_stor = compat.any(map(has_es, nodes))
1170
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1171
      raise errors.OpPrereqError("Disk template %s not supported with"
1172
                                 " exclusive storage" % self.op.disk_template,
1173
                                 errors.ECODE_STATE)
1174
    for disk in self.disks:
1175
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1176

    
1177
    node_uuids = [pnode.uuid] + self.secondaries
1178

    
1179
    if not self.adopt_disks:
1180
      if self.op.disk_template == constants.DT_RBD:
1181
        # _CheckRADOSFreeSpace() is just a placeholder.
1182
        # Any function that checks prerequisites can be placed here.
1183
        # Check if there is enough space on the RADOS cluster.
1184
        CheckRADOSFreeSpace()
1185
      elif self.op.disk_template == constants.DT_EXT:
1186
        # FIXME: Function that checks prereqs if needed
1187
        pass
1188
      elif self.op.disk_template in constants.DTS_LVM:
1189
        # Check lv size requirements, if not adopting
1190
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1191
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1192
      else:
1193
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1194
        pass
1195

    
1196
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1197
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1198
                                disk[constants.IDISK_ADOPT])
1199
                     for disk in self.disks])
1200
      if len(all_lvs) != len(self.disks):
1201
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1202
                                   errors.ECODE_INVAL)
1203
      for lv_name in all_lvs:
1204
        try:
1205
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1206
          # to ReserveLV uses the same syntax
1207
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1208
        except errors.ReservationError:
1209
          raise errors.OpPrereqError("LV named %s used by another instance" %
1210
                                     lv_name, errors.ECODE_NOTUNIQUE)
1211

    
1212
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1213
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1214

    
1215
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1216
                                       vg_names.payload.keys())[pnode.uuid]
1217
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1218
      node_lvs = node_lvs.payload
1219

    
1220
      delta = all_lvs.difference(node_lvs.keys())
1221
      if delta:
1222
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1223
                                   utils.CommaJoin(delta),
1224
                                   errors.ECODE_INVAL)
1225
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1226
      if online_lvs:
1227
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1228
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1229
                                   errors.ECODE_STATE)
1230
      # update the size of disk based on what is found
1231
      for dsk in self.disks:
1232
        dsk[constants.IDISK_SIZE] = \
1233
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1234
                                        dsk[constants.IDISK_ADOPT])][0]))
1235

    
1236
    elif self.op.disk_template == constants.DT_BLOCK:
1237
      # Normalize and de-duplicate device paths
1238
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1239
                       for disk in self.disks])
1240
      if len(all_disks) != len(self.disks):
1241
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1242
                                   errors.ECODE_INVAL)
1243
      baddisks = [d for d in all_disks
1244
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1245
      if baddisks:
1246
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1247
                                   " cannot be adopted" %
1248
                                   (utils.CommaJoin(baddisks),
1249
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1250
                                   errors.ECODE_INVAL)
1251

    
1252
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1253
                                            list(all_disks))[pnode.uuid]
1254
      node_disks.Raise("Cannot get block device information from node %s" %
1255
                       pnode.name)
1256
      node_disks = node_disks.payload
1257
      delta = all_disks.difference(node_disks.keys())
1258
      if delta:
1259
        raise errors.OpPrereqError("Missing block device(s): %s" %
1260
                                   utils.CommaJoin(delta),
1261
                                   errors.ECODE_INVAL)
1262
      for dsk in self.disks:
1263
        dsk[constants.IDISK_SIZE] = \
1264
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1265

    
1266
    # Check disk access param to be compatible with specified hypervisor
1267
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1268
    node_group = self.cfg.GetNodeGroup(node_info.group)
1269
    disk_params = self.cfg.GetGroupDiskParams(node_group)
1270
    access_type = disk_params[self.op.disk_template].get(
1271
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1272
    )
1273

    
1274
    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1275
                                            self.op.disk_template,
1276
                                            access_type):
1277
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1278
                                 " used with %s disk access param" %
1279
                                 (self.op.hypervisor, access_type),
1280
                                  errors.ECODE_STATE)
1281

    
1282
    # Verify instance specs
1283
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1284
    ispec = {
1285
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1286
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1287
      constants.ISPEC_DISK_COUNT: len(self.disks),
1288
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1289
                                  for disk in self.disks],
1290
      constants.ISPEC_NIC_COUNT: len(self.nics),
1291
      constants.ISPEC_SPINDLE_USE: spindle_use,
1292
      }
1293

    
1294
    group_info = self.cfg.GetNodeGroup(pnode.group)
1295
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1296
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1297
                                               self.op.disk_template)
1298
    if not self.op.ignore_ipolicy and res:
1299
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1300
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1301
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1302

    
1303
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1304

    
1305
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full,
1306
                  self.op.force_variant)
1307

    
1308
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1309

    
1310
    #TODO: _CheckExtParams (remotely)
1311
    # Check parameters for extstorage
1312

    
1313
    # memory check on primary node
1314
    #TODO(dynmem): use MINMEM for checking
1315
    if self.op.start:
1316
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1317
                                self.op.hvparams)
1318
      CheckNodeFreeMemory(self, self.pnode.uuid,
1319
                          "creating instance %s" % self.op.instance_name,
1320
                          self.be_full[constants.BE_MAXMEM],
1321
                          self.op.hypervisor, hvfull)
1322

    
1323
    self.dry_run_result = list(node_uuids)
1324

    
1325
  def _RemoveDegradedDisks(self, feedback_fn, disk_abort, instance):
1326
    """Removes degraded disks and instance.
1327

1328
    It optionally checks whether disks are degraded.  If the disks are
1329
    degraded, they are removed and the instance is also removed from
1330
    the configuration.
1331

1332
    If L{disk_abort} is True, then the disks are considered degraded
1333
    and removed, and the instance is removed from the configuration.
1334

1335
    If L{disk_abort} is False, then it first checks whether disks are
1336
    degraded and, if so, it removes the disks and the instance is
1337
    removed from the configuration.
1338

1339
    @type feedback_fn: callable
1340
    @param feedback_fn: function used send feedback back to the caller
1341

1342
    @type disk_abort: boolean
1343
    @param disk_abort:
1344
      True if disks are degraded, False to first check if disks are
1345
      degraded
1346

1347
    @type instance: L{objects.Instance}
1348
    @param instance: instance containing the disks to check
1349

1350
    @rtype: NoneType
1351
    @return: None
1352
    @raise errors.OpPrereqError: if disks are degraded
1353

1354
    """
1355
    if disk_abort:
1356
      pass
1357
    elif self.op.wait_for_sync:
1358
      disk_abort = not WaitForSync(self, instance)
1359
    elif instance.disk_template in constants.DTS_INT_MIRROR:
1360
      # make sure the disks are not degraded (still sync-ing is ok)
1361
      feedback_fn("* checking mirrors status")
1362
      disk_abort = not WaitForSync(self, instance, oneshot=True)
1363
    else:
1364
      disk_abort = False
1365

    
1366
    if disk_abort:
1367
      RemoveDisks(self, instance)
1368
      self.cfg.RemoveInstance(instance.uuid)
1369
      # Make sure the instance lock gets removed
1370
      self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
1371
      raise errors.OpExecError("There are some degraded disks for"
1372
                               " this instance")
1373

    
1374
  def Exec(self, feedback_fn):
1375
    """Create and add the instance to the cluster.
1376

1377
    """
1378
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1379
                self.owned_locks(locking.LEVEL_NODE)), \
1380
      "Node locks differ from node resource locks"
1381

    
1382
    ht_kind = self.op.hypervisor
1383
    if ht_kind in constants.HTS_REQ_PORT:
1384
      network_port = self.cfg.AllocatePort()
1385
    else:
1386
      network_port = None
1387

    
1388
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1389

    
1390
    # This is ugly but we got a chicken-egg problem here
1391
    # We can only take the group disk parameters, as the instance
1392
    # has no disks yet (we are generating them right here).
1393
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1394
    disks = GenerateDiskTemplate(self,
1395
                                 self.op.disk_template,
1396
                                 instance_uuid, self.pnode.uuid,
1397
                                 self.secondaries,
1398
                                 self.disks,
1399
                                 self.instance_file_storage_dir,
1400
                                 self.op.file_driver,
1401
                                 0,
1402
                                 feedback_fn,
1403
                                 self.cfg.GetGroupDiskParams(nodegroup))
1404

    
1405
    if self.op.os_type is None:
1406
      os_type = ""
1407
    else:
1408
      os_type = self.op.os_type
1409

    
1410
    iobj = objects.Instance(name=self.op.instance_name,
1411
                            uuid=instance_uuid,
1412
                            os=os_type,
1413
                            primary_node=self.pnode.uuid,
1414
                            nics=self.nics, disks=disks,
1415
                            disk_template=self.op.disk_template,
1416
                            disks_active=False,
1417
                            admin_state=constants.ADMINST_DOWN,
1418
                            network_port=network_port,
1419
                            beparams=self.op.beparams,
1420
                            hvparams=self.op.hvparams,
1421
                            hypervisor=self.op.hypervisor,
1422
                            osparams=self.op.osparams,
1423
                            osparams_private=self.op.osparams_private,
1424
                            )
1425

    
1426
    if self.op.tags:
1427
      for tag in self.op.tags:
1428
        iobj.AddTag(tag)
1429

    
1430
    if self.adopt_disks:
1431
      if self.op.disk_template == constants.DT_PLAIN:
1432
        # rename LVs to the newly-generated names; we need to construct
1433
        # 'fake' LV disks with the old data, plus the new unique_id
1434
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1435
        rename_to = []
1436
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1437
          rename_to.append(t_dsk.logical_id)
1438
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1439
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1440
                                               zip(tmp_disks, rename_to))
1441
        result.Raise("Failed to rename adoped LVs")
1442
    else:
1443
      feedback_fn("* creating instance disks...")
1444
      try:
1445
        CreateDisks(self, iobj)
1446
      except errors.OpExecError:
1447
        self.LogWarning("Device creation failed")
1448
        self.cfg.ReleaseDRBDMinors(instance_uuid)
1449
        raise
1450

    
1451
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1452

    
1453
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1454

    
1455
    # Declare that we don't want to remove the instance lock anymore, as we've
1456
    # added the instance to the config
1457
    del self.remove_locks[locking.LEVEL_INSTANCE]
1458

    
1459
    if self.op.mode == constants.INSTANCE_IMPORT:
1460
      # Release unused nodes
1461
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1462
    else:
1463
      # Release all nodes
1464
      ReleaseLocks(self, locking.LEVEL_NODE)
1465

    
1466
    # Wipe disks
1467
    disk_abort = False
1468
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1469
      feedback_fn("* wiping instance disks...")
1470
      try:
1471
        WipeDisks(self, iobj)
1472
      except errors.OpExecError, err:
1473
        logging.exception("Wiping disks failed")
1474
        self.LogWarning("Wiping instance disks failed (%s)", err)
1475
        disk_abort = True
1476

    
1477
    self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
1478

    
1479
    # Image disks
1480
    os_image = objects.GetOSImage(iobj.osparams)
1481
    disk_abort = False
1482

    
1483
    if not self.adopt_disks and os_image is not None:
1484
      feedback_fn("* imaging instance disks...")
1485
      try:
1486
        ImageDisks(self, iobj, os_image)
1487
      except errors.OpExecError, err:
1488
        logging.exception("Imaging disks failed")
1489
        self.LogWarning("Imaging instance disks failed (%s)", err)
1490
        disk_abort = True
1491

    
1492
    self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
1493

    
1494
    # instance disks are now active
1495
    iobj.disks_active = True
1496

    
1497
    # Release all node resource locks
1498
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1499

    
1500
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1501
      if self.op.mode == constants.INSTANCE_CREATE:
1502
        os_image = objects.GetOSImage(self.op.osparams)
1503

    
1504
        if os_image is None and not self.op.no_install:
1505
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1506
                        not self.op.wait_for_sync)
1507
          if pause_sync:
1508
            feedback_fn("* pausing disk sync to install instance OS")
1509
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1510
                                                              (iobj.disks,
1511
                                                               iobj), True)
1512
            for idx, success in enumerate(result.payload):
1513
              if not success:
1514
                logging.warn("pause-sync of instance %s for disk %d failed",
1515
                             self.op.instance_name, idx)
1516

    
1517
          feedback_fn("* running the instance OS create scripts...")
1518
          # FIXME: pass debug option from opcode to backend
1519
          os_add_result = \
1520
            self.rpc.call_instance_os_add(self.pnode.uuid,
1521
                                          (iobj, self.op.osparams_secret),
1522
                                          False,
1523
                                          self.op.debug_level)
1524
          if pause_sync:
1525
            feedback_fn("* resuming disk sync")
1526
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1527
                                                              (iobj.disks,
1528
                                                               iobj), False)
1529
            for idx, success in enumerate(result.payload):
1530
              if not success:
1531
                logging.warn("resume-sync of instance %s for disk %d failed",
1532
                             self.op.instance_name, idx)
1533

    
1534
          os_add_result.Raise("Could not add os for instance %s"
1535
                              " on node %s" % (self.op.instance_name,
1536
                                               self.pnode.name))
1537

    
1538
      else:
1539
        if self.op.mode == constants.INSTANCE_IMPORT:
1540
          feedback_fn("* running the instance OS import scripts...")
1541

    
1542
          transfers = []
1543

    
1544
          for idx, image in enumerate(self.src_images):
1545
            if not image:
1546
              continue
1547

    
1548
            if iobj.os:
1549
              dst_io = constants.IEIO_SCRIPT
1550
              dst_ioargs = ((iobj.disks[idx], iobj), idx)
1551
            else:
1552
              dst_io = constants.IEIO_RAW_DISK
1553
              dst_ioargs = (iobj.disks[idx], iobj)
1554

    
1555
            # FIXME: pass debug option from opcode to backend
1556
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1557
                                               constants.IEIO_FILE, (image, ),
1558
                                               dst_io, dst_ioargs,
1559
                                               None)
1560
            transfers.append(dt)
1561

    
1562
          import_result = \
1563
            masterd.instance.TransferInstanceData(self, feedback_fn,
1564
                                                  self.op.src_node_uuid,
1565
                                                  self.pnode.uuid,
1566
                                                  self.pnode.secondary_ip,
1567
                                                  self.op.compress,
1568
                                                  iobj, transfers)
1569
          if not compat.all(import_result):
1570
            self.LogWarning("Some disks for instance %s on node %s were not"
1571
                            " imported successfully" % (self.op.instance_name,
1572
                                                        self.pnode.name))
1573

    
1574
          rename_from = self._old_instance_name
1575

    
1576
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1577
          feedback_fn("* preparing remote import...")
1578
          # The source cluster will stop the instance before attempting to make
1579
          # a connection. In some cases stopping an instance can take a long
1580
          # time, hence the shutdown timeout is added to the connection
1581
          # timeout.
1582
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1583
                             self.op.source_shutdown_timeout)
1584
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1585

    
1586
          assert iobj.primary_node == self.pnode.uuid
1587
          disk_results = \
1588
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1589
                                          self.source_x509_ca,
1590
                                          self._cds, self.op.compress, timeouts)
1591
          if not compat.all(disk_results):
1592
            # TODO: Should the instance still be started, even if some disks
1593
            # failed to import (valid for local imports, too)?
1594
            self.LogWarning("Some disks for instance %s on node %s were not"
1595
                            " imported successfully" % (self.op.instance_name,
1596
                                                        self.pnode.name))
1597

    
1598
          rename_from = self.source_instance_name
1599

    
1600
        else:
1601
          # also checked in the prereq part
1602
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1603
                                       % self.op.mode)
1604

    
1605
        assert iobj.name == self.op.instance_name
1606

    
1607
        # Run rename script on newly imported instance
1608
        if iobj.os:
1609
          feedback_fn("Running rename script for %s" % self.op.instance_name)
1610
          result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1611
                                                     rename_from,
1612
                                                     self.op.debug_level)
1613
          result.Warn("Failed to run rename script for %s on node %s" %
1614
                      (self.op.instance_name, self.pnode.name), self.LogWarning)
1615

    
1616
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1617

    
1618
    if self.op.start:
1619
      iobj.admin_state = constants.ADMINST_UP
1620
      self.cfg.Update(iobj, feedback_fn)
1621
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1622
                   self.pnode.name)
1623
      feedback_fn("* starting instance...")
1624
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1625
                                            False, self.op.reason)
1626
      result.Raise("Could not start instance")
1627

    
1628
    UpdateMetadata(feedback_fn, self.rpc, iobj,
1629
                   osparams_private=self.op.osparams_private,
1630
                   osparams_secret=self.op.osparams_secret)
1631

    
1632
    return self.cfg.GetNodeNames(list(iobj.all_nodes))
1633

    
1634

    
1635
class LUInstanceRename(LogicalUnit):
1636
  """Rename an instance.
1637

1638
  """
1639
  HPATH = "instance-rename"
1640
  HTYPE = constants.HTYPE_INSTANCE
1641

    
1642
  def CheckArguments(self):
1643
    """Check arguments.
1644

1645
    """
1646
    if self.op.ip_check and not self.op.name_check:
1647
      # TODO: make the ip check more flexible and not depend on the name check
1648
      raise errors.OpPrereqError("IP address check requires a name check",
1649
                                 errors.ECODE_INVAL)
1650

    
1651
  def BuildHooksEnv(self):
1652
    """Build hooks env.
1653

1654
    This runs on master, primary and secondary nodes of the instance.
1655

1656
    """
1657
    env = BuildInstanceHookEnvByObject(self, self.instance)
1658
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1659
    return env
1660

    
1661
  def BuildHooksNodes(self):
1662
    """Build hooks nodes.
1663

1664
    """
1665
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1666
    return (nl, nl)
1667

    
1668
  def CheckPrereq(self):
1669
    """Check prerequisites.
1670

1671
    This checks that the instance is in the cluster and is not running.
1672

1673
    """
1674
    (self.op.instance_uuid, self.op.instance_name) = \
1675
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1676
                                self.op.instance_name)
1677
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1678
    assert instance is not None
1679

    
1680
    # It should actually not happen that an instance is running with a disabled
1681
    # disk template, but in case it does, the renaming of file-based instances
1682
    # will fail horribly. Thus, we test it before.
1683
    if (instance.disk_template in constants.DTS_FILEBASED and
1684
        self.op.new_name != instance.name):
1685
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1686
                               instance.disk_template)
1687

    
1688
    CheckNodeOnline(self, instance.primary_node)
1689
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1690
                       msg="cannot rename")
1691
    self.instance = instance
1692

    
1693
    new_name = self.op.new_name
1694
    if self.op.name_check:
1695
      hostname = _CheckHostnameSane(self, new_name)
1696
      new_name = self.op.new_name = hostname.name
1697
      if (self.op.ip_check and
1698
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1699
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1700
                                   (hostname.ip, new_name),
1701
                                   errors.ECODE_NOTUNIQUE)
1702

    
1703
    instance_names = [inst.name for
1704
                      inst in self.cfg.GetAllInstancesInfo().values()]
1705
    if new_name in instance_names and new_name != instance.name:
1706
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1707
                                 new_name, errors.ECODE_EXISTS)
1708

    
1709
  def Exec(self, feedback_fn):
1710
    """Rename the instance.
1711

1712
    """
1713
    old_name = self.instance.name
1714

    
1715
    rename_file_storage = False
1716
    if (self.instance.disk_template in (constants.DT_FILE,
1717
                                        constants.DT_SHARED_FILE) and
1718
        self.op.new_name != self.instance.name):
1719
      old_file_storage_dir = os.path.dirname(
1720
                               self.instance.disks[0].logical_id[1])
1721
      rename_file_storage = True
1722

    
1723
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1724
    # Change the instance lock. This is definitely safe while we hold the BGL.
1725
    # Otherwise the new lock would have to be added in acquired mode.
1726
    assert self.REQ_BGL
1727
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1728

    
1729
    # re-read the instance from the configuration after rename
1730
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1731

    
1732
    if rename_file_storage:
1733
      new_file_storage_dir = os.path.dirname(
1734
                               renamed_inst.disks[0].logical_id[1])
1735
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1736
                                                     old_file_storage_dir,
1737
                                                     new_file_storage_dir)
1738
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1739
                   " (but the instance has been renamed in Ganeti)" %
1740
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1741
                    old_file_storage_dir, new_file_storage_dir))
1742

    
1743
    StartInstanceDisks(self, renamed_inst, None)
1744
    renamed_inst = self.cfg.GetInstanceInfo(renamed_inst.uuid)
1745

    
1746
    # update info on disks
1747
    info = GetInstanceInfoText(renamed_inst)
1748
    for (idx, disk) in enumerate(renamed_inst.disks):
1749
      for node_uuid in renamed_inst.all_nodes:
1750
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1751
                                                (disk, renamed_inst), info)
1752
        result.Warn("Error setting info on node %s for disk %s" %
1753
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1754
    try:
1755
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1756
                                                 renamed_inst, old_name,
1757
                                                 self.op.debug_level)
1758
      result.Warn("Could not run OS rename script for instance %s on node %s"
1759
                  " (but the instance has been renamed in Ganeti)" %
1760
                  (renamed_inst.name,
1761
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1762
                  self.LogWarning)
1763
    finally:
1764
      ShutdownInstanceDisks(self, renamed_inst)
1765

    
1766
    return renamed_inst.name
1767

    
1768

    
1769
class LUInstanceRemove(LogicalUnit):
1770
  """Remove an instance.
1771

1772
  """
1773
  HPATH = "instance-remove"
1774
  HTYPE = constants.HTYPE_INSTANCE
1775
  REQ_BGL = False
1776

    
1777
  def ExpandNames(self):
1778
    self._ExpandAndLockInstance()
1779
    self.needed_locks[locking.LEVEL_NODE] = []
1780
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1781
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1782

    
1783
  def DeclareLocks(self, level):
1784
    if level == locking.LEVEL_NODE:
1785
      self._LockInstancesNodes()
1786
    elif level == locking.LEVEL_NODE_RES:
1787
      # Copy node locks
1788
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1789
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1790

    
1791
  def BuildHooksEnv(self):
1792
    """Build hooks env.
1793

1794
    This runs on master, primary and secondary nodes of the instance.
1795

1796
    """
1797
    env = BuildInstanceHookEnvByObject(self, self.instance,
1798
                                       secondary_nodes=self.secondary_nodes,
1799
                                       disks=self.inst_disks)
1800
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1801
    return env
1802

    
1803
  def BuildHooksNodes(self):
1804
    """Build hooks nodes.
1805

1806
    """
1807
    nl = [self.cfg.GetMasterNode()]
1808
    nl_post = list(self.instance.all_nodes) + nl
1809
    return (nl, nl_post)
1810

    
1811
  def CheckPrereq(self):
1812
    """Check prerequisites.
1813

1814
    This checks that the instance is in the cluster.
1815

1816
    """
1817
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1818
    assert self.instance is not None, \
1819
      "Cannot retrieve locked instance %s" % self.op.instance_name
1820
    self.secondary_nodes = self.instance.secondary_nodes
1821
    self.inst_disks = self.instance.disks
1822

    
1823
  def Exec(self, feedback_fn):
1824
    """Remove the instance.
1825

1826
    """
1827
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1828
                 self.cfg.GetNodeName(self.instance.primary_node))
1829

    
1830
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1831
                                             self.instance,
1832
                                             self.op.shutdown_timeout,
1833
                                             self.op.reason)
1834
    if self.op.ignore_failures:
1835
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1836
    else:
1837
      result.Raise("Could not shutdown instance %s on node %s" %
1838
                   (self.instance.name,
1839
                    self.cfg.GetNodeName(self.instance.primary_node)))
1840

    
1841
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1842
            self.owned_locks(locking.LEVEL_NODE_RES))
1843
    assert not (set(self.instance.all_nodes) -
1844
                self.owned_locks(locking.LEVEL_NODE)), \
1845
      "Not owning correct locks"
1846

    
1847
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1848

    
1849

    
1850
class LUInstanceMove(LogicalUnit):
1851
  """Move an instance by data-copying.
1852

1853
  """
1854
  HPATH = "instance-move"
1855
  HTYPE = constants.HTYPE_INSTANCE
1856
  REQ_BGL = False
1857

    
1858
  def ExpandNames(self):
1859
    self._ExpandAndLockInstance()
1860
    (self.op.target_node_uuid, self.op.target_node) = \
1861
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1862
                            self.op.target_node)
1863
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1864
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1865
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1866

    
1867
  def DeclareLocks(self, level):
1868
    if level == locking.LEVEL_NODE:
1869
      self._LockInstancesNodes(primary_only=True)
1870
    elif level == locking.LEVEL_NODE_RES:
1871
      # Copy node locks
1872
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1873
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1874

    
1875
  def BuildHooksEnv(self):
1876
    """Build hooks env.
1877

1878
    This runs on master, primary and target nodes of the instance.
1879

1880
    """
1881
    env = {
1882
      "TARGET_NODE": self.op.target_node,
1883
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1884
      }
1885
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1886
    return env
1887

    
1888
  def BuildHooksNodes(self):
1889
    """Build hooks nodes.
1890

1891
    """
1892
    nl = [
1893
      self.cfg.GetMasterNode(),
1894
      self.instance.primary_node,
1895
      self.op.target_node_uuid,
1896
      ]
1897
    return (nl, nl)
1898

    
1899
  def CheckPrereq(self):
1900
    """Check prerequisites.
1901

1902
    This checks that the instance is in the cluster.
1903

1904
    """
1905
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1906
    assert self.instance is not None, \
1907
      "Cannot retrieve locked instance %s" % self.op.instance_name
1908

    
1909
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1910
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1911
                                 self.instance.disk_template,
1912
                                 errors.ECODE_STATE)
1913

    
1914
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1915
    assert target_node is not None, \
1916
      "Cannot retrieve locked node %s" % self.op.target_node
1917

    
1918
    self.target_node_uuid = target_node.uuid
1919
    if target_node.uuid == self.instance.primary_node:
1920
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1921
                                 (self.instance.name, target_node.name),
1922
                                 errors.ECODE_STATE)
1923

    
1924
    cluster = self.cfg.GetClusterInfo()
1925
    bep = cluster.FillBE(self.instance)
1926

    
1927
    for idx, dsk in enumerate(self.instance.disks):
1928
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1929
                              constants.DT_SHARED_FILE, constants.DT_GLUSTER):
1930
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1931
                                   " cannot copy" % idx, errors.ECODE_STATE)
1932

    
1933
    CheckNodeOnline(self, target_node.uuid)
1934
    CheckNodeNotDrained(self, target_node.uuid)
1935
    CheckNodeVmCapable(self, target_node.uuid)
1936
    group_info = self.cfg.GetNodeGroup(target_node.group)
1937
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1938
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1939
                           ignore=self.op.ignore_ipolicy)
1940

    
1941
    if self.instance.admin_state == constants.ADMINST_UP:
1942
      # check memory requirements on the target node
1943
      CheckNodeFreeMemory(
1944
          self, target_node.uuid, "failing over instance %s" %
1945
          self.instance.name, bep[constants.BE_MAXMEM],
1946
          self.instance.hypervisor,
1947
          cluster.hvparams[self.instance.hypervisor])
1948
    else:
1949
      self.LogInfo("Not checking memory on the secondary node as"
1950
                   " instance will not be started")
1951

    
1952
    # check bridge existance
1953
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1954

    
1955
  def Exec(self, feedback_fn):
1956
    """Move an instance.
1957

1958
    The move is done by shutting it down on its present node, copying
1959
    the data over (slow) and starting it on the new node.
1960

1961
    """
1962
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1963
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1964

    
1965
    self.LogInfo("Shutting down instance %s on source node %s",
1966
                 self.instance.name, source_node.name)
1967

    
1968
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1969
            self.owned_locks(locking.LEVEL_NODE_RES))
1970

    
1971
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1972
                                             self.op.shutdown_timeout,
1973
                                             self.op.reason)
1974
    if self.op.ignore_consistency:
1975
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1976
                  " anyway. Please make sure node %s is down. Error details" %
1977
                  (self.instance.name, source_node.name, source_node.name),
1978
                  self.LogWarning)
1979
    else:
1980
      result.Raise("Could not shutdown instance %s on node %s" %
1981
                   (self.instance.name, source_node.name))
1982

    
1983
    # create the target disks
1984
    try:
1985
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1986
    except errors.OpExecError:
1987
      self.LogWarning("Device creation failed")
1988
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1989
      raise
1990

    
1991
    errs = []
1992
    transfers = []
1993
    # activate, get path, create transfer jobs
1994
    for idx, disk in enumerate(self.instance.disks):
1995
      # FIXME: pass debug option from opcode to backend
1996
      dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1997
                                         constants.IEIO_RAW_DISK,
1998
                                         (disk, self.instance),
1999
                                         constants.IEIO_RAW_DISK,
2000
                                         (disk, self.instance),
2001
                                         None)
2002
      transfers.append(dt)
2003

    
2004
    import_result = \
2005
      masterd.instance.TransferInstanceData(self, feedback_fn,
2006
                                            source_node.uuid,
2007
                                            target_node.uuid,
2008
                                            target_node.secondary_ip,
2009
                                            self.op.compress,
2010
                                            self.instance, transfers)
2011
    if not compat.all(import_result):
2012
      errs.append("Failed to transfer instance data")
2013

    
2014
    if errs:
2015
      self.LogWarning("Some disks failed to copy, aborting")
2016
      try:
2017
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
2018
      finally:
2019
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
2020
        raise errors.OpExecError("Errors during disk copy: %s" %
2021
                                 (",".join(errs),))
2022

    
2023
    self.instance.primary_node = target_node.uuid
2024
    self.cfg.Update(self.instance, feedback_fn)
2025

    
2026
    self.LogInfo("Removing the disks on the original node")
2027
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
2028

    
2029
    # Only start the instance if it's marked as up
2030
    if self.instance.admin_state == constants.ADMINST_UP:
2031
      self.LogInfo("Starting instance %s on node %s",
2032
                   self.instance.name, target_node.name)
2033

    
2034
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
2035
                                          ignore_secondaries=True)
2036
      if not disks_ok:
2037
        ShutdownInstanceDisks(self, self.instance)
2038
        raise errors.OpExecError("Can't activate the instance's disks")
2039

    
2040
      result = self.rpc.call_instance_start(target_node.uuid,
2041
                                            (self.instance, None, None), False,
2042
                                            self.op.reason)
2043
      msg = result.fail_msg
2044
      if msg:
2045
        ShutdownInstanceDisks(self, self.instance)
2046
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
2047
                                 (self.instance.name, target_node.name, msg))
2048

    
2049

    
2050
class LUInstanceMultiAlloc(NoHooksLU):
2051
  """Allocates multiple instances at the same time.
2052

2053
  """
2054
  REQ_BGL = False
2055

    
2056
  def CheckArguments(self):
2057
    """Check arguments.
2058

2059
    """
2060
    nodes = []
2061
    for inst in self.op.instances:
2062
      if inst.iallocator is not None:
2063
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
2064
                                   " instance objects", errors.ECODE_INVAL)
2065
      nodes.append(bool(inst.pnode))
2066
      if inst.disk_template in constants.DTS_INT_MIRROR:
2067
        nodes.append(bool(inst.snode))
2068

    
2069
    has_nodes = compat.any(nodes)
2070
    if compat.all(nodes) ^ has_nodes:
2071
      raise errors.OpPrereqError("There are instance objects providing"
2072
                                 " pnode/snode while others do not",
2073
                                 errors.ECODE_INVAL)
2074

    
2075
    if not has_nodes and self.op.iallocator is None:
2076
      default_iallocator = self.cfg.GetDefaultIAllocator()
2077
      if default_iallocator:
2078
        self.op.iallocator = default_iallocator
2079
      else:
2080
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
2081
                                   " given and no cluster-wide default"
2082
                                   " iallocator found; please specify either"
2083
                                   " an iallocator or nodes on the instances"
2084
                                   " or set a cluster-wide default iallocator",
2085
                                   errors.ECODE_INVAL)
2086

    
2087
    _CheckOpportunisticLocking(self.op)
2088

    
2089
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
2090
    if dups:
2091
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
2092
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
2093

    
2094
  def ExpandNames(self):
2095
    """Calculate the locks.
2096

2097
    """
2098
    self.share_locks = ShareAll()
2099
    self.needed_locks = {
2100
      # iallocator will select nodes and even if no iallocator is used,
2101
      # collisions with LUInstanceCreate should be avoided
2102
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
2103
      }
2104

    
2105
    if self.op.iallocator:
2106
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2107
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
2108

    
2109
      if self.op.opportunistic_locking:
2110
        self.opportunistic_locks[locking.LEVEL_NODE] = True
2111
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
2112
    else:
2113
      nodeslist = []
2114
      for inst in self.op.instances:
2115
        (inst.pnode_uuid, inst.pnode) = \
2116
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
2117
        nodeslist.append(inst.pnode_uuid)
2118
        if inst.snode is not None:
2119
          (inst.snode_uuid, inst.snode) = \
2120
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
2121
          nodeslist.append(inst.snode_uuid)
2122

    
2123
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
2124
      # Lock resources of instance's primary and secondary nodes (copy to
2125
      # prevent accidential modification)
2126
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2127

    
2128
  def CheckPrereq(self):
2129
    """Check prerequisite.
2130

2131
    """
2132
    if self.op.iallocator:
2133
      cluster = self.cfg.GetClusterInfo()
2134
      default_vg = self.cfg.GetVGName()
2135
      ec_id = self.proc.GetECId()
2136

    
2137
      if self.op.opportunistic_locking:
2138
        # Only consider nodes for which a lock is held
2139
        node_whitelist = self.cfg.GetNodeNames(
2140
          set(self.owned_locks(locking.LEVEL_NODE)) &
2141
          set(self.owned_locks(locking.LEVEL_NODE_RES)))
2142
      else:
2143
        node_whitelist = None
2144

    
2145
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
2146
                                           _ComputeNics(op, cluster, None,
2147
                                                        self.cfg, ec_id),
2148
                                           _ComputeFullBeParams(op, cluster),
2149
                                           node_whitelist)
2150
               for op in self.op.instances]
2151

    
2152
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2153
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2154

    
2155
      ial.Run(self.op.iallocator)
2156

    
2157
      if not ial.success:
2158
        raise errors.OpPrereqError("Can't compute nodes using"
2159
                                   " iallocator '%s': %s" %
2160
                                   (self.op.iallocator, ial.info),
2161
                                   errors.ECODE_NORES)
2162

    
2163
      self.ia_result = ial.result
2164

    
2165
    if self.op.dry_run:
2166
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2167
        constants.JOB_IDS_KEY: [],
2168
        })
2169

    
2170
  def _ConstructPartialResult(self):
2171
    """Contructs the partial result.
2172

2173
    """
2174
    if self.op.iallocator:
2175
      (allocatable, failed_insts) = self.ia_result
2176
      allocatable_insts = map(compat.fst, allocatable)
2177
    else:
2178
      allocatable_insts = [op.instance_name for op in self.op.instances]
2179
      failed_insts = []
2180

    
2181
    return {
2182
      constants.ALLOCATABLE_KEY: allocatable_insts,
2183
      constants.FAILED_KEY: failed_insts,
2184
      }
2185

    
2186
  def Exec(self, feedback_fn):
2187
    """Executes the opcode.
2188

2189
    """
2190
    jobs = []
2191
    if self.op.iallocator:
2192
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2193
      (allocatable, failed) = self.ia_result
2194

    
2195
      for (name, node_names) in allocatable:
2196
        op = op2inst.pop(name)
2197

    
2198
        (op.pnode_uuid, op.pnode) = \
2199
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2200
        if len(node_names) > 1:
2201
          (op.snode_uuid, op.snode) = \
2202
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2203

    
2204
          jobs.append([op])
2205

    
2206
        missing = set(op2inst.keys()) - set(failed)
2207
        assert not missing, \
2208
          "Iallocator did return incomplete result: %s" % \
2209
          utils.CommaJoin(missing)
2210
    else:
2211
      jobs.extend([op] for op in self.op.instances)
2212

    
2213
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2214

    
2215

    
2216
class _InstNicModPrivate:
2217
  """Data structure for network interface modifications.
2218

2219
  Used by L{LUInstanceSetParams}.
2220

2221
  """
2222
  def __init__(self):
2223
    self.params = None
2224
    self.filled = None
2225

    
2226

    
2227
def _PrepareContainerMods(mods, private_fn):
2228
  """Prepares a list of container modifications by adding a private data field.
2229

2230
  @type mods: list of tuples; (operation, index, parameters)
2231
  @param mods: List of modifications
2232
  @type private_fn: callable or None
2233
  @param private_fn: Callable for constructing a private data field for a
2234
    modification
2235
  @rtype: list
2236

2237
  """
2238
  if private_fn is None:
2239
    fn = lambda: None
2240
  else:
2241
    fn = private_fn
2242

    
2243
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2244

    
2245

    
2246
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2247
  """Checks if nodes have enough physical CPUs
2248

2249
  This function checks if all given nodes have the needed number of
2250
  physical CPUs. In case any node has less CPUs or we cannot get the
2251
  information from the node, this function raises an OpPrereqError
2252
  exception.
2253

2254
  @type lu: C{LogicalUnit}
2255
  @param lu: a logical unit from which we get configuration data
2256
  @type node_uuids: C{list}
2257
  @param node_uuids: the list of node UUIDs to check
2258
  @type requested: C{int}
2259
  @param requested: the minimum acceptable number of physical CPUs
2260
  @type hypervisor_specs: list of pairs (string, dict of strings)
2261
  @param hypervisor_specs: list of hypervisor specifications in
2262
      pairs (hypervisor_name, hvparams)
2263
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2264
      or we cannot check the node
2265

2266
  """
2267
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2268
  for node_uuid in node_uuids:
2269
    info = nodeinfo[node_uuid]
2270
    node_name = lu.cfg.GetNodeName(node_uuid)
2271
    info.Raise("Cannot get current information from node %s" % node_name,
2272
               prereq=True, ecode=errors.ECODE_ENVIRON)
2273
    (_, _, (hv_info, )) = info.payload
2274
    num_cpus = hv_info.get("cpu_total", None)
2275
    if not isinstance(num_cpus, int):
2276
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2277
                                 " on node %s, result was '%s'" %
2278
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2279
    if requested > num_cpus:
2280
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2281
                                 "required" % (node_name, num_cpus, requested),
2282
                                 errors.ECODE_NORES)
2283

    
2284

    
2285
def GetItemFromContainer(identifier, kind, container):
2286
  """Return the item refered by the identifier.
2287

2288
  @type identifier: string
2289
  @param identifier: Item index or name or UUID
2290
  @type kind: string
2291
  @param kind: One-word item description
2292
  @type container: list
2293
  @param container: Container to get the item from
2294

2295
  """
2296
  # Index
2297
  try:
2298
    idx = int(identifier)
2299
    if idx == -1:
2300
      # Append
2301
      absidx = len(container) - 1
2302
    elif idx < 0:
2303
      raise IndexError("Not accepting negative indices other than -1")
2304
    elif idx > len(container):
2305
      raise IndexError("Got %s index %s, but there are only %s" %
2306
                       (kind, idx, len(container)))
2307
    else:
2308
      absidx = idx
2309
    return (absidx, container[idx])
2310
  except ValueError:
2311
    pass
2312

    
2313
  for idx, item in enumerate(container):
2314
    if item.uuid == identifier or item.name == identifier:
2315
      return (idx, item)
2316

    
2317
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2318
                             (kind, identifier), errors.ECODE_NOENT)
2319

    
2320

    
2321
def _ApplyContainerMods(kind, container, chgdesc, mods,
2322
                        create_fn, modify_fn, remove_fn,
2323
                        post_add_fn=None):
2324
  """Applies descriptions in C{mods} to C{container}.
2325

2326
  @type kind: string
2327
  @param kind: One-word item description
2328
  @type container: list
2329
  @param container: Container to modify
2330
  @type chgdesc: None or list
2331
  @param chgdesc: List of applied changes
2332
  @type mods: list
2333
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2334
  @type create_fn: callable
2335
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2336
    receives absolute item index, parameters and private data object as added
2337
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2338
    as list
2339
  @type modify_fn: callable
2340
  @param modify_fn: Callback for modifying an existing item
2341
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2342
    and private data object as added by L{_PrepareContainerMods}, returns
2343
    changes as list
2344
  @type remove_fn: callable
2345
  @param remove_fn: Callback on removing item; receives absolute item index,
2346
    item and private data object as added by L{_PrepareContainerMods}
2347
  @type post_add_fn: callable
2348
  @param post_add_fn: Callable for post-processing a newly created item after
2349
    it has been put into the container. It receives the index of the new item
2350
    and the new item as parameters.
2351

2352
  """
2353
  for (op, identifier, params, private) in mods:
2354
    changes = None
2355

    
2356
    if op == constants.DDM_ADD:
2357
      # Calculate where item will be added
2358
      # When adding an item, identifier can only be an index
2359
      try:
2360
        idx = int(identifier)
2361
      except ValueError:
2362
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2363
                                   " identifier for %s" % constants.DDM_ADD,
2364
                                   errors.ECODE_INVAL)
2365
      if idx == -1:
2366
        addidx = len(container)
2367
      else:
2368
        if idx < 0:
2369
          raise IndexError("Not accepting negative indices other than -1")
2370
        elif idx > len(container):
2371
          raise IndexError("Got %s index %s, but there are only %s" %
2372
                           (kind, idx, len(container)))
2373
        addidx = idx
2374

    
2375
      if create_fn is None:
2376
        item = params
2377
      else:
2378
        (item, changes) = create_fn(addidx, params, private)
2379

    
2380
      if idx == -1:
2381
        container.append(item)
2382
      else:
2383
        assert idx >= 0
2384
        assert idx <= len(container)
2385
        # list.insert does so before the specified index
2386
        container.insert(idx, item)
2387

    
2388
      if post_add_fn is not None:
2389
        post_add_fn(addidx, item)
2390

    
2391
    else:
2392
      # Retrieve existing item
2393
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2394

    
2395
      if op == constants.DDM_REMOVE:
2396
        assert not params
2397

    
2398
        changes = [("%s/%s" % (kind, absidx), "remove")]
2399

    
2400
        if remove_fn is not None:
2401
          msg = remove_fn(absidx, item, private)
2402
          if msg:
2403
            changes.append(("%s/%s" % (kind, absidx), msg))
2404

    
2405
        assert container[absidx] == item
2406
        del container[absidx]
2407
      elif op == constants.DDM_MODIFY:
2408
        if modify_fn is not None:
2409
          changes = modify_fn(absidx, item, params, private)
2410
      else:
2411
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2412

    
2413
    assert _TApplyContModsCbChanges(changes)
2414

    
2415
    if not (chgdesc is None or changes is None):
2416
      chgdesc.extend(changes)
2417

    
2418

    
2419
def _UpdateIvNames(base_index, disks):
2420
  """Updates the C{iv_name} attribute of disks.
2421

2422
  @type disks: list of L{objects.Disk}
2423

2424
  """
2425
  for (idx, disk) in enumerate(disks):
2426
    disk.iv_name = "disk/%s" % (base_index + idx, )
2427

    
2428

    
2429
class LUInstanceSetParams(LogicalUnit):
2430
  """Modifies an instances's parameters.
2431

2432
  """
2433
  HPATH = "instance-modify"
2434
  HTYPE = constants.HTYPE_INSTANCE
2435
  REQ_BGL = False
2436

    
2437
  @staticmethod
2438
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2439
    assert ht.TList(mods)
2440
    assert not mods or len(mods[0]) in (2, 3)
2441

    
2442
    if mods and len(mods[0]) == 2:
2443
      result = []
2444

    
2445
      addremove = 0
2446
      for op, params in mods:
2447
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2448
          result.append((op, -1, params))
2449
          addremove += 1
2450

    
2451
          if addremove > 1:
2452
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2453
                                       " supported at a time" % kind,
2454
                                       errors.ECODE_INVAL)
2455
        else:
2456
          result.append((constants.DDM_MODIFY, op, params))
2457

    
2458
      assert verify_fn(result)
2459
    else:
2460
      result = mods
2461

    
2462
    return result
2463

    
2464
  @staticmethod
2465
  def _CheckMods(kind, mods, key_types, item_fn):
2466
    """Ensures requested disk/NIC modifications are valid.
2467

2468
    """
2469
    for (op, _, params) in mods:
2470
      assert ht.TDict(params)
2471

    
2472
      # If 'key_types' is an empty dict, we assume we have an
2473
      # 'ext' template and thus do not ForceDictType
2474
      if key_types:
2475
        utils.ForceDictType(params, key_types)
2476

    
2477
      if op == constants.DDM_REMOVE:
2478
        if params:
2479
          raise errors.OpPrereqError("No settings should be passed when"
2480
                                     " removing a %s" % kind,
2481
                                     errors.ECODE_INVAL)
2482
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2483
        item_fn(op, params)
2484
      else:
2485
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2486

    
2487
  def _VerifyDiskModification(self, op, params, excl_stor):
2488
    """Verifies a disk modification.
2489

2490
    """
2491
    if op == constants.DDM_ADD:
2492
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2493
      if mode not in constants.DISK_ACCESS_SET:
2494
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2495
                                   errors.ECODE_INVAL)
2496

    
2497
      size = params.get(constants.IDISK_SIZE, None)
2498
      if size is None:
2499
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2500
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2501
      size = int(size)
2502

    
2503
      params[constants.IDISK_SIZE] = size
2504
      name = params.get(constants.IDISK_NAME, None)
2505
      if name is not None and name.lower() == constants.VALUE_NONE:
2506
        params[constants.IDISK_NAME] = None
2507

    
2508
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2509

    
2510
    elif op == constants.DDM_MODIFY:
2511
      if constants.IDISK_SIZE in params:
2512
        raise errors.OpPrereqError("Disk size change not possible, use"
2513
                                   " grow-disk", errors.ECODE_INVAL)
2514

    
2515
      # Disk modification supports changing only the disk name and mode.
2516
      # Changing arbitrary parameters is allowed only for ext disk template",
2517
      if self.instance.disk_template != constants.DT_EXT:
2518
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2519

    
2520
      name = params.get(constants.IDISK_NAME, None)
2521
      if name is not None and name.lower() == constants.VALUE_NONE:
2522
        params[constants.IDISK_NAME] = None
2523

    
2524
  @staticmethod
2525
  def _VerifyNicModification(op, params):
2526
    """Verifies a network interface modification.
2527

2528
    """
2529
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2530
      ip = params.get(constants.INIC_IP, None)
2531
      name = params.get(constants.INIC_NAME, None)
2532
      req_net = params.get(constants.INIC_NETWORK, None)
2533
      link = params.get(constants.NIC_LINK, None)
2534
      mode = params.get(constants.NIC_MODE, None)
2535
      if name is not None and name.lower() == constants.VALUE_NONE:
2536
        params[constants.INIC_NAME] = None
2537
      if req_net is not None:
2538
        if req_net.lower() == constants.VALUE_NONE:
2539
          params[constants.INIC_NETWORK] = None
2540
          req_net = None
2541
        elif link is not None or mode is not None:
2542
          raise errors.OpPrereqError("If network is given"
2543
                                     " mode or link should not",
2544
                                     errors.ECODE_INVAL)
2545

    
2546
      if op == constants.DDM_ADD:
2547
        macaddr = params.get(constants.INIC_MAC, None)
2548
        if macaddr is None:
2549
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2550

    
2551
      if ip is not None:
2552
        if ip.lower() == constants.VALUE_NONE:
2553
          params[constants.INIC_IP] = None
2554
        else:
2555
          if ip.lower() == constants.NIC_IP_POOL:
2556
            if op == constants.DDM_ADD and req_net is None:
2557
              raise errors.OpPrereqError("If ip=pool, parameter network"
2558
                                         " cannot be none",
2559
                                         errors.ECODE_INVAL)
2560
          else:
2561
            if not netutils.IPAddress.IsValid(ip):
2562
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2563
                                         errors.ECODE_INVAL)
2564

    
2565
      if constants.INIC_MAC in params:
2566
        macaddr = params[constants.INIC_MAC]
2567
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2568
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2569

    
2570
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2571
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2572
                                     " modifying an existing NIC",
2573
                                     errors.ECODE_INVAL)
2574

    
2575
  def CheckArguments(self):
2576
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2577
            self.op.hvparams or self.op.beparams or self.op.os_name or
2578
            self.op.osparams or self.op.offline is not None or
2579
            self.op.runtime_mem or self.op.pnode or self.op.osparams_private or
2580
            self.op.instance_communication is not None):
2581
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2582

    
2583
    if self.op.hvparams:
2584
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2585
                           "hypervisor", "instance", "cluster")
2586

    
2587
    self.op.disks = self._UpgradeDiskNicMods(
2588
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2589
    self.op.nics = self._UpgradeDiskNicMods(
2590
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2591

    
2592
    if self.op.disks and self.op.disk_template is not None:
2593
      raise errors.OpPrereqError("Disk template conversion and other disk"
2594
                                 " changes not supported at the same time",
2595
                                 errors.ECODE_INVAL)
2596

    
2597
    if (self.op.disk_template and
2598
        self.op.disk_template in constants.DTS_INT_MIRROR and
2599
        self.op.remote_node is None):
2600
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2601
                                 " one requires specifying a secondary node",
2602
                                 errors.ECODE_INVAL)
2603

    
2604
    # Check NIC modifications
2605
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2606
                    self._VerifyNicModification)
2607

    
2608
    if self.op.pnode:
2609
      (self.op.pnode_uuid, self.op.pnode) = \
2610
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2611

    
2612
  def ExpandNames(self):
2613
    self._ExpandAndLockInstance()
2614
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2615
    # Can't even acquire node locks in shared mode as upcoming changes in
2616
    # Ganeti 2.6 will start to modify the node object on disk conversion
2617
    self.needed_locks[locking.LEVEL_NODE] = []
2618
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2619
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2620
    # Look node group to look up the ipolicy
2621
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2622

    
2623
  def DeclareLocks(self, level):
2624
    if level == locking.LEVEL_NODEGROUP:
2625
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2626
      # Acquire locks for the instance's nodegroups optimistically. Needs
2627
      # to be verified in CheckPrereq
2628
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2629
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2630
    elif level == locking.LEVEL_NODE:
2631
      self._LockInstancesNodes()
2632
      if self.op.disk_template and self.op.remote_node:
2633
        (self.op.remote_node_uuid, self.op.remote_node) = \
2634
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2635
                                self.op.remote_node)
2636
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2637
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2638
      # Copy node locks
2639
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2640
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2641

    
2642
  def BuildHooksEnv(self):
2643
    """Build hooks env.
2644

2645
    This runs on the master, primary and secondaries.
2646

2647
    """
2648
    args = {}
2649
    if constants.BE_MINMEM in self.be_new:
2650
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2651
    if constants.BE_MAXMEM in self.be_new:
2652
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2653
    if constants.BE_VCPUS in self.be_new:
2654
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2655
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2656
    # information at all.
2657

    
2658
    if self._new_nics is not None:
2659
      nics = []
2660

    
2661
      for nic in self._new_nics:
2662
        n = copy.deepcopy(nic)
2663
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2664
        n.nicparams = nicparams
2665
        nics.append(NICToTuple(self, n))
2666

    
2667
      args["nics"] = nics
2668

    
2669
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2670
    if self.op.disk_template:
2671
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2672
    if self.op.runtime_mem:
2673
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2674

    
2675
    return env
2676

    
2677
  def BuildHooksNodes(self):
2678
    """Build hooks nodes.
2679

2680
    """
2681
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2682
    return (nl, nl)
2683

    
2684
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2685
                              old_params, cluster, pnode_uuid):
2686

    
2687
    update_params_dict = dict([(key, params[key])
2688
                               for key in constants.NICS_PARAMETERS
2689
                               if key in params])
2690

    
2691
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2692
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2693

    
2694
    new_net_uuid = None
2695
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2696
    if new_net_uuid_or_name:
2697
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2698
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2699

    
2700
    if old_net_uuid:
2701
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2702

    
2703
    if new_net_uuid:
2704
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2705
      if not netparams:
2706
        raise errors.OpPrereqError("No netparams found for the network"
2707
                                   " %s, probably not connected" %
2708
                                   new_net_obj.name, errors.ECODE_INVAL)
2709
      new_params = dict(netparams)
2710
    else:
2711
      new_params = GetUpdatedParams(old_params, update_params_dict)
2712

    
2713
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2714

    
2715
    new_filled_params = cluster.SimpleFillNIC(new_params)
2716
    objects.NIC.CheckParameterSyntax(new_filled_params)
2717

    
2718
    new_mode = new_filled_params[constants.NIC_MODE]
2719
    if new_mode == constants.NIC_MODE_BRIDGED:
2720
      bridge = new_filled_params[constants.NIC_LINK]
2721
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2722
      if msg:
2723
        msg = "Error checking bridges on node '%s': %s" % \
2724
                (self.cfg.GetNodeName(pnode_uuid), msg)
2725
        if self.op.force:
2726
          self.warn.append(msg)
2727
        else:
2728
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2729

    
2730
    elif new_mode == constants.NIC_MODE_ROUTED:
2731
      ip = params.get(constants.INIC_IP, old_ip)
2732
      if ip is None:
2733
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2734
                                   " on a routed NIC", errors.ECODE_INVAL)
2735

    
2736
    elif new_mode == constants.NIC_MODE_OVS:
2737
      # TODO: check OVS link
2738
      self.LogInfo("OVS links are currently not checked for correctness")
2739

    
2740
    if constants.INIC_MAC in params:
2741
      mac = params[constants.INIC_MAC]
2742
      if mac is None:
2743
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2744
                                   errors.ECODE_INVAL)
2745
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2746
        # otherwise generate the MAC address
2747
        params[constants.INIC_MAC] = \
2748
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2749
      else:
2750
        # or validate/reserve the current one
2751
        try:
2752
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2753
        except errors.ReservationError:
2754
          raise errors.OpPrereqError("MAC address '%s' already in use"
2755
                                     " in cluster" % mac,
2756
                                     errors.ECODE_NOTUNIQUE)
2757
    elif new_net_uuid != old_net_uuid:
2758

    
2759
      def get_net_prefix(net_uuid):
2760
        mac_prefix = None
2761
        if net_uuid:
2762
          nobj = self.cfg.GetNetwork(net_uuid)
2763
          mac_prefix = nobj.mac_prefix
2764

    
2765
        return mac_prefix
2766

    
2767
      new_prefix = get_net_prefix(new_net_uuid)
2768
      old_prefix = get_net_prefix(old_net_uuid)
2769
      if old_prefix != new_prefix:
2770
        params[constants.INIC_MAC] = \
2771
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2772

    
2773
    # if there is a change in (ip, network) tuple
2774
    new_ip = params.get(constants.INIC_IP, old_ip)
2775
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2776
      if new_ip:
2777
        # if IP is pool then require a network and generate one IP
2778
        if new_ip.lower() == constants.NIC_IP_POOL:
2779
          if new_net_uuid:
2780
            try:
2781
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2782
            except errors.ReservationError:
2783
              raise errors.OpPrereqError("Unable to get a free IP"
2784
                                         " from the address pool",
2785
                                         errors.ECODE_STATE)
2786
            self.LogInfo("Chose IP %s from network %s",
2787
                         new_ip,
2788
                         new_net_obj.name)
2789
            params[constants.INIC_IP] = new_ip
2790
          else:
2791
            raise errors.OpPrereqError("ip=pool, but no network found",
2792
                                       errors.ECODE_INVAL)
2793
        # Reserve new IP if in the new network if any
2794
        elif new_net_uuid:
2795
          try:
2796
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2797
                               check=self.op.conflicts_check)
2798
            self.LogInfo("Reserving IP %s in network %s",
2799
                         new_ip, new_net_obj.name)
2800
          except errors.ReservationError:
2801
            raise errors.OpPrereqError("IP %s not available in network %s" %
2802
                                       (new_ip, new_net_obj.name),
2803
                                       errors.ECODE_NOTUNIQUE)
2804
        # new network is None so check if new IP is a conflicting IP
2805
        elif self.op.conflicts_check:
2806
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2807

    
2808
      # release old IP if old network is not None
2809
      if old_ip and old_net_uuid:
2810
        try:
2811
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2812
        except errors.AddressPoolError:
2813
          logging.warning("Release IP %s not contained in network %s",
2814
                          old_ip, old_net_obj.name)
2815

    
2816
    # there are no changes in (ip, network) tuple and old network is not None
2817
    elif (old_net_uuid is not None and
2818
          (req_link is not None or req_mode is not None)):
2819
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2820
                                 " a NIC that is connected to a network",
2821
                                 errors.ECODE_INVAL)
2822

    
2823
    private.params = new_params
2824
    private.filled = new_filled_params
2825

    
2826
  def _PreCheckDiskTemplate(self, pnode_info):
2827
    """CheckPrereq checks related to a new disk template."""
2828
    # Arguments are passed to avoid configuration lookups
2829
    pnode_uuid = self.instance.primary_node
2830
    if self.instance.disk_template == self.op.disk_template:
2831
      raise errors.OpPrereqError("Instance already has disk template %s" %
2832
                                 self.instance.disk_template,
2833
                                 errors.ECODE_INVAL)
2834

    
2835
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2836
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2837
                                 " cluster." % self.op.disk_template)
2838

    
2839
    if (self.instance.disk_template,
2840
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2841
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2842
                                 " %s to %s" % (self.instance.disk_template,
2843
                                                self.op.disk_template),
2844
                                 errors.ECODE_INVAL)
2845
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2846
                       msg="cannot change disk template")
2847
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2848
      if self.op.remote_node_uuid == pnode_uuid:
2849
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2850
                                   " as the primary node of the instance" %
2851
                                   self.op.remote_node, errors.ECODE_STATE)
2852
      CheckNodeOnline(self, self.op.remote_node_uuid)
2853
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2854
      # FIXME: here we assume that the old instance type is DT_PLAIN
2855
      assert self.instance.disk_template == constants.DT_PLAIN
2856
      disks = [{constants.IDISK_SIZE: d.size,
2857
                constants.IDISK_VG: d.logical_id[0]}
2858
               for d in self.instance.disks]
2859
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2860
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2861

    
2862
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2863
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2864
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2865
                                                              snode_group)
2866
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2867
                             ignore=self.op.ignore_ipolicy)
2868
      if pnode_info.group != snode_info.group:
2869
        self.LogWarning("The primary and secondary nodes are in two"
2870
                        " different node groups; the disk parameters"
2871
                        " from the first disk's node group will be"
2872
                        " used")
2873

    
2874
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2875
      # Make sure none of the nodes require exclusive storage
2876
      nodes = [pnode_info]
2877
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2878
        assert snode_info
2879
        nodes.append(snode_info)
2880
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2881
      if compat.any(map(has_es, nodes)):
2882
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2883
                  " storage is enabled" % (self.instance.disk_template,
2884
                                           self.op.disk_template))
2885
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2886

    
2887
  def _PreCheckDisks(self, ispec):
2888
    """CheckPrereq checks related to disk changes.
2889

2890
    @type ispec: dict
2891
    @param ispec: instance specs to be updated with the new disks
2892

2893
    """
2894
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2895

    
2896
    excl_stor = compat.any(
2897
      rpc.GetExclusiveStorageForNodes(self.cfg,
2898
                                      self.instance.all_nodes).values()
2899
      )
2900

    
2901
    # Check disk modifications. This is done here and not in CheckArguments
2902
    # (as with NICs), because we need to know the instance's disk template
2903
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2904
    if self.instance.disk_template == constants.DT_EXT:
2905
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2906
    else:
2907
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2908
                      ver_fn)
2909

    
2910
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2911

    
2912
    # Check the validity of the `provider' parameter
2913
    if self.instance.disk_template in constants.DT_EXT:
2914
      for mod in self.diskmod:
2915
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2916
        if mod[0] == constants.DDM_ADD:
2917
          if ext_provider is None:
2918
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2919
                                       " '%s' missing, during disk add" %
2920
                                       (constants.DT_EXT,
2921
                                        constants.IDISK_PROVIDER),
2922
                                       errors.ECODE_NOENT)
2923
        elif mod[0] == constants.DDM_MODIFY:
2924
          if ext_provider:
2925
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2926
                                       " modification" %
2927
                                       constants.IDISK_PROVIDER,
2928
                                       errors.ECODE_INVAL)
2929
    else:
2930
      for mod in self.diskmod:
2931
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2932
        if ext_provider is not None:
2933
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2934
                                     " instances of type '%s'" %
2935
                                     (constants.IDISK_PROVIDER,
2936
                                      constants.DT_EXT),
2937
                                     errors.ECODE_INVAL)
2938

    
2939
    if not self.op.wait_for_sync and self.instance.disks_active:
2940
      for mod in self.diskmod:
2941
        if mod[0] == constants.DDM_ADD:
2942
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2943
                                     " activated disks and"
2944
                                     " --no-wait-for-sync given.",
2945
                                     errors.ECODE_INVAL)
2946

    
2947
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2948
      raise errors.OpPrereqError("Disk operations not supported for"
2949
                                 " diskless instances", errors.ECODE_INVAL)
2950

    
2951
    def _PrepareDiskMod(_, disk, params, __):
2952
      disk.name = params.get(constants.IDISK_NAME, None)
2953

    
2954
    # Verify disk changes (operating on a copy)
2955
    disks = copy.deepcopy(self.instance.disks)
2956
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2957
                        _PrepareDiskMod, None)
2958
    utils.ValidateDeviceNames("disk", disks)
2959
    if len(disks) > constants.MAX_DISKS:
2960
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2961
                                 " more" % constants.MAX_DISKS,
2962
                                 errors.ECODE_STATE)
2963
    disk_sizes = [disk.size for disk in self.instance.disks]
2964
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2965
                      self.diskmod if op == constants.DDM_ADD)
2966
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2967
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2968

    
2969
    if self.op.offline is not None and self.op.offline:
2970
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2971
                         msg="can't change to offline")
2972

    
2973
  @staticmethod
2974
  def _InstanceCommunicationDDM(cfg, instance_communication, instance):
2975
    """Create a NIC mod that adds or removes the instance
2976
    communication NIC to a running instance.
2977

2978
    The NICS are dynamically created using the Dynamic Device
2979
    Modification (DDM).  This function produces a NIC modification
2980
    (mod) that inserts an additional NIC meant for instance
2981
    communication in or removes an existing instance communication NIC
2982
    from a running instance, using DDM.
2983

2984
    @type cfg: L{config.ConfigWriter}
2985
    @param cfg: cluster configuration
2986

2987
    @type instance_communication: boolean
2988
    @param instance_communication: whether instance communication is
2989
                                   enabled or disabled
2990

2991
    @type instance: L{objects.Instance}
2992
    @param instance: instance to which the NIC mod will be applied to
2993

2994
    @rtype: (L{constants.DDM_ADD}, -1, parameters) or
2995
            (L{constants.DDM_REMOVE}, -1, parameters) or
2996
            L{None}
2997
    @return: DDM mod containing an action to add or remove the NIC, or
2998
             None if nothing needs to be done
2999

3000
    """
3001
    nic_name = _ComputeInstanceCommunicationNIC(instance.name)
3002

    
3003
    instance_communication_nic = None
3004

    
3005
    for nic in instance.nics:
3006
      if nic.name == nic_name:
3007
        instance_communication_nic = nic
3008
        break
3009

    
3010
    if instance_communication and not instance_communication_nic:
3011
      action = constants.DDM_ADD
3012
      params = {constants.INIC_NAME: nic_name,
3013
                constants.INIC_MAC: constants.VALUE_GENERATE,
3014
                constants.INIC_IP: constants.NIC_IP_POOL,
3015
                constants.INIC_NETWORK:
3016
                  cfg.GetInstanceCommunicationNetwork()}
3017
    elif not instance_communication and instance_communication_nic:
3018
      action = constants.DDM_REMOVE
3019
      params = None
3020
    else:
3021
      action = None
3022
      params = None
3023

    
3024
    if action is not None:
3025
      return (action, -1, params)
3026
    else:
3027
      return None
3028

    
3029
  def CheckPrereq(self):
3030
    """Check prerequisites.
3031

3032
    This only checks the instance list against the existing names.
3033

3034
    """
3035
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
3036
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3037
    self.cluster = self.cfg.GetClusterInfo()
3038
    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
3039

    
3040
    assert self.instance is not None, \
3041
      "Cannot retrieve locked instance %s" % self.op.instance_name
3042

    
3043
    pnode_uuid = self.instance.primary_node
3044

    
3045
    self.warn = []
3046

    
3047
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
3048
        not self.op.force):
3049
      # verify that the instance is not up
3050
      instance_info = self.rpc.call_instance_info(
3051
          pnode_uuid, self.instance.name, self.instance.hypervisor,
3052
          cluster_hvparams)
3053
      if instance_info.fail_msg:
3054
        self.warn.append("Can't get instance runtime information: %s" %
3055
                         instance_info.fail_msg)
3056
      elif instance_info.payload:
3057
        raise errors.OpPrereqError("Instance is still running on %s" %
3058
                                   self.cfg.GetNodeName(pnode_uuid),
3059
                                   errors.ECODE_STATE)
3060

    
3061
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
3062
    node_uuids = list(self.instance.all_nodes)
3063
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
3064

    
3065
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
3066
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
3067
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
3068

    
3069
    # dictionary with instance information after the modification
3070
    ispec = {}
3071

    
3072
    if self.op.hotplug or self.op.hotplug_if_possible:
3073
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
3074
                                               self.instance)
3075
      if result.fail_msg:
3076
        if self.op.hotplug:
3077
          result.Raise("Hotplug is not possible: %s" % result.fail_msg,
3078
                       prereq=True)
3079
        else:
3080
          self.LogWarning(result.fail_msg)
3081
          self.op.hotplug = False
3082
          self.LogInfo("Modification will take place without hotplugging.")
3083
      else:
3084
        self.op.hotplug = True
3085

    
3086
    # Prepare NIC modifications
3087
    # add or remove NIC for instance communication
3088
    if self.op.instance_communication is not None:
3089
      mod = self._InstanceCommunicationDDM(self.cfg,
3090
                                           self.op.instance_communication,
3091
                                           self.instance)
3092
      if mod is not None:
3093
        self.op.nics.append(mod)
3094

    
3095
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
3096

    
3097
    # disks processing
3098
    assert not (self.op.disk_template and self.op.disks), \
3099
      "Can't modify disk template and apply disk changes at the same time"
3100

    
3101
    if self.op.disk_template:
3102
      self._PreCheckDiskTemplate(pnode_info)
3103

    
3104
    self._PreCheckDisks(ispec)
3105

    
3106
    # hvparams processing
3107
    if self.op.hvparams:
3108
      hv_type = self.instance.hypervisor
3109
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
3110
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
3111
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
3112

    
3113
      # local check
3114
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
3115
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
3116
      self.hv_proposed = self.hv_new = hv_new # the new actual values
3117
      self.hv_inst = i_hvdict # the new dict (without defaults)
3118
    else:
3119
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
3120
                                                   self.instance.os,
3121
                                                   self.instance.hvparams)
3122
      self.hv_new = self.hv_inst = {}
3123

    
3124
    # beparams processing
3125
    if self.op.beparams:
3126
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
3127
                                  use_none=True)
3128
      objects.UpgradeBeParams(i_bedict)
3129
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
3130
      be_new = self.cluster.SimpleFillBE(i_bedict)
3131
      self.be_proposed = self.be_new = be_new # the new actual values
3132
      self.be_inst = i_bedict # the new dict (without defaults)
3133
    else:
3134
      self.be_new = self.be_inst = {}
3135
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
3136
    be_old = self.cluster.FillBE(self.instance)
3137

    
3138
    # CPU param validation -- checking every time a parameter is
3139
    # changed to cover all cases where either CPU mask or vcpus have
3140
    # changed
3141
    if (constants.BE_VCPUS in self.be_proposed and
3142
        constants.HV_CPU_MASK in self.hv_proposed):
3143
      cpu_list = \
3144
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
3145
      # Verify mask is consistent with number of vCPUs. Can skip this
3146
      # test if only 1 entry in the CPU mask, which means same mask
3147
      # is applied to all vCPUs.
3148
      if (len(cpu_list) > 1 and
3149
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
3150
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
3151
                                   " CPU mask [%s]" %
3152
                                   (self.be_proposed[constants.BE_VCPUS],
3153
                                    self.hv_proposed[constants.HV_CPU_MASK]),
3154
                                   errors.ECODE_INVAL)
3155

    
3156
      # Only perform this test if a new CPU mask is given
3157
      if constants.HV_CPU_MASK in self.hv_new:
3158
        # Calculate the largest CPU number requested
3159
        max_requested_cpu = max(map(max, cpu_list))
3160
        # Check that all of the instance's nodes have enough physical CPUs to
3161
        # satisfy the requested CPU mask
3162
        hvspecs = [(self.instance.hypervisor,
3163
                    self.cfg.GetClusterInfo()
3164
                      .hvparams[self.instance.hypervisor])]
3165
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
3166
                                max_requested_cpu + 1,
3167
                                hvspecs)
3168

    
3169
    # osparams processing
3170
    if self.op.os_name and not self.op.force:
3171
      instance_os = self.op.os_name
3172
    else:
3173
      instance_os = self.instance.os
3174

    
3175
    if self.op.osparams or self.op.osparams_private:
3176
      public_parms = self.op.osparams or {}
3177
      private_parms = self.op.osparams_private or {}
3178
      dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
3179

    
3180
      if dupe_keys:
3181
        raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
3182
                                   utils.CommaJoin(dupe_keys))
3183

    
3184
      self.os_inst = GetUpdatedParams(self.instance.osparams,
3185
                                      public_parms)
3186
      self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
3187
                                              private_parms)
3188

    
3189
      CheckOSParams(self, True, node_uuids, instance_os,
3190
                    objects.FillDict(self.os_inst,
3191
                                     self.os_inst_private),
3192
                    self.op.force_variant)
3193

    
3194
    else:
3195
      self.os_inst = {}
3196
      self.os_inst_private = {}
3197

    
3198
    #TODO(dynmem): do the appropriate check involving MINMEM
3199
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
3200
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
3201
      mem_check_list = [pnode_uuid]
3202
      if be_new[constants.BE_AUTO_BALANCE]:
3203
        # either we changed auto_balance to yes or it was from before
3204
        mem_check_list.extend(self.instance.secondary_nodes)
3205
      instance_info = self.rpc.call_instance_info(
3206
          pnode_uuid, self.instance.name, self.instance.hypervisor,
3207
          cluster_hvparams)
3208
      hvspecs = [(self.instance.hypervisor,
3209
                  cluster_hvparams)]
3210
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
3211
                                         hvspecs)
3212
      pninfo = nodeinfo[pnode_uuid]
3213
      msg = pninfo.fail_msg
3214
      if msg:
3215
        # Assume the primary node is unreachable and go ahead
3216
        self.warn.append("Can't get info from primary node %s: %s" %
3217
                         (self.cfg.GetNodeName(pnode_uuid), msg))
3218
      else:
3219
        (_, _, (pnhvinfo, )) = pninfo.payload
3220
        if not isinstance(pnhvinfo.get("memory_free", None), int):
3221
          self.warn.append("Node data from primary node %s doesn't contain"
3222
                           " free memory information" %
3223
                           self.cfg.GetNodeName(pnode_uuid))
3224
        elif instance_info.fail_msg:
3225
          self.warn.append("Can't get instance runtime information: %s" %
3226
                           instance_info.fail_msg)
3227
        else:
3228
          if instance_info.payload:
3229
            current_mem = int(instance_info.payload["memory"])
3230
          else:
3231
            # Assume instance not running
3232
            # (there is a slight race condition here, but it's not very
3233
            # probable, and we have no other way to check)
3234
            # TODO: Describe race condition
3235
            current_mem = 0
3236
          #TODO(dynmem): do the appropriate check involving MINMEM
3237
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
3238
                      pnhvinfo["memory_free"])
3239
          if miss_mem > 0:
3240
            raise errors.OpPrereqError("This change will prevent the instance"
3241
                                       " from starting, due to %d MB of memory"
3242
                                       " missing on its primary node" %
3243
                                       miss_mem, errors.ECODE_NORES)
3244

    
3245
      if be_new[constants.BE_AUTO_BALANCE]:
3246
        for node_uuid, nres in nodeinfo.items():
3247
          if node_uuid not in self.instance.secondary_nodes:
3248
            continue
3249
          nres.Raise("Can't get info from secondary node %s" %
3250
                     self.cfg.GetNodeName(node_uuid), prereq=True,
3251
                     ecode=errors.ECODE_STATE)
3252
          (_, _, (nhvinfo, )) = nres.payload
3253
          if not isinstance(nhvinfo.get("memory_free", None), int):
3254
            raise errors.OpPrereqError("Secondary node %s didn't return free"
3255
                                       " memory information" %
3256
                                       self.cfg.GetNodeName(node_uuid),
3257
                                       errors.ECODE_STATE)
3258
          #TODO(dynmem): do the appropriate check involving MINMEM
3259
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3260
            raise errors.OpPrereqError("This change will prevent the instance"
3261
                                       " from failover to its secondary node"
3262
                                       " %s, due to not enough memory" %
3263
                                       self.cfg.GetNodeName(node_uuid),
3264
                                       errors.ECODE_STATE)
3265

    
3266
    if self.op.runtime_mem:
3267
      remote_info = self.rpc.call_instance_info(
3268
         self.instance.primary_node, self.instance.name,
3269
         self.instance.hypervisor,
3270
         cluster_hvparams)
3271
      remote_info.Raise("Error checking node %s" %
3272
                        self.cfg.GetNodeName(self.instance.primary_node))
3273
      if not remote_info.payload: # not running already
3274
        raise errors.OpPrereqError("Instance %s is not running" %
3275
                                   self.instance.name, errors.ECODE_STATE)
3276

    
3277
      current_memory = remote_info.payload["memory"]
3278
      if (not self.op.force and
3279
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3280
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3281
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3282
                                   " and %d MB of memory unless --force is"
3283
                                   " given" %
3284
                                   (self.instance.name,
3285
                                    self.be_proposed[constants.BE_MINMEM],
3286
                                    self.be_proposed[constants.BE_MAXMEM]),
3287
                                   errors.ECODE_INVAL)
3288

    
3289
      delta = self.op.runtime_mem - current_memory
3290
      if delta > 0:
3291
        CheckNodeFreeMemory(
3292
            self, self.instance.primary_node,
3293
            "ballooning memory for instance %s" % self.instance.name, delta,
3294
            self.instance.hypervisor,
3295
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3296

    
3297
    # make self.cluster visible in the functions below
3298
    cluster = self.cluster
3299

    
3300
    def _PrepareNicCreate(_, params, private):
3301
      self._PrepareNicModification(params, private, None, None,
3302
                                   {}, cluster, pnode_uuid)
3303
      return (None, None)
3304

    
3305
    def _PrepareNicMod(_, nic, params, private):
3306
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3307
                                   nic.nicparams, cluster, pnode_uuid)
3308
      return None
3309

    
3310
    def _PrepareNicRemove(_, params, __):
3311
      ip = params.ip
3312
      net = params.network
3313
      if net is not None and ip is not None:
3314
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3315

    
3316
    # Verify NIC changes (operating on copy)
3317
    nics = [nic.Copy() for nic in self.instance.nics]
3318
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3319
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3320
    if len(nics) > constants.MAX_NICS:
3321
      raise errors.OpPrereqError("Instance has too many network interfaces"
3322
                                 " (%d), cannot add more" % constants.MAX_NICS,
3323
                                 errors.ECODE_STATE)
3324

    
3325
    # Pre-compute NIC changes (necessary to use result in hooks)
3326
    self._nic_chgdesc = []
3327
    if self.nicmod:
3328
      # Operate on copies as this is still in prereq
3329
      nics = [nic.Copy() for nic in self.instance.nics]
3330
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3331
                          self._CreateNewNic, self._ApplyNicMods,
3332
                          self._RemoveNic)
3333
      # Verify that NIC names are unique and valid
3334
      utils.ValidateDeviceNames("NIC", nics)
3335
      self._new_nics = nics
3336
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3337
    else:
3338
      self._new_nics = None
3339
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3340

    
3341
    if not self.op.ignore_ipolicy:
3342
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3343
                                                              group_info)
3344

    
3345
      # Fill ispec with backend parameters
3346
      ispec[constants.ISPEC_SPINDLE_USE] = \
3347
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3348
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3349
                                                         None)
3350

    
3351
      # Copy ispec to verify parameters with min/max values separately
3352
      if self.op.disk_template:
3353
        new_disk_template = self.op.disk_template
3354
      else:
3355
        new_disk_template = self.instance.disk_template
3356
      ispec_max = ispec.copy()
3357
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3358
        self.be_new.get(constants.BE_MAXMEM, None)
3359
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3360
                                                     new_disk_template)
3361
      ispec_min = ispec.copy()
3362
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3363
        self.be_new.get(constants.BE_MINMEM, None)
3364
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3365
                                                     new_disk_template)
3366

    
3367
      if (res_max or res_min):
3368
        # FIXME: Improve error message by including information about whether
3369
        # the upper or lower limit of the parameter fails the ipolicy.
3370
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3371
               (group_info, group_info.name,
3372
                utils.CommaJoin(set(res_max + res_min))))
3373
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3374

    
3375
  def _ConvertPlainToDrbd(self, feedback_fn):
3376
    """Converts an instance from plain to drbd.
3377

3378
    """
3379
    feedback_fn("Converting template to drbd")
3380
    pnode_uuid = self.instance.primary_node
3381
    snode_uuid = self.op.remote_node_uuid
3382

    
3383
    assert self.instance.disk_template == constants.DT_PLAIN
3384

    
3385
    # create a fake disk info for _GenerateDiskTemplate
3386
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3387
                  constants.IDISK_VG: d.logical_id[0],
3388
                  constants.IDISK_NAME: d.name}
3389
                 for d in self.instance.disks]
3390
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3391
                                     self.instance.uuid, pnode_uuid,
3392
                                     [snode_uuid], disk_info, None, None, 0,
3393
                                     feedback_fn, self.diskparams)
3394
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3395
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3396
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3397
    info = GetInstanceInfoText(self.instance)
3398
    feedback_fn("Creating additional volumes...")
3399
    # first, create the missing data and meta devices
3400
    for disk in anno_disks:
3401
      # unfortunately this is... not too nice
3402
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3403
                           info, True, p_excl_stor)
3404
      for child in disk.children:
3405
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3406
                             s_excl_stor)
3407
    # at this stage, all new LVs have been created, we can rename the
3408
    # old ones
3409
    feedback_fn("Renaming original volumes...")
3410
    rename_list = [(o, n.children[0].logical_id)
3411
                   for (o, n) in zip(self.instance.disks, new_disks)]
3412
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3413
    result.Raise("Failed to rename original LVs")
3414

    
3415
    feedback_fn("Initializing DRBD devices...")
3416
    # all child devices are in place, we can now create the DRBD devices
3417
    try:
3418
      for disk in anno_disks:
3419
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3420
                                       (snode_uuid, s_excl_stor)]:
3421
          f_create = node_uuid == pnode_uuid
3422
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3423
                               f_create, excl_stor)
3424
    except errors.GenericError, e:
3425
      feedback_fn("Initializing of DRBD devices failed;"
3426
                  " renaming back original volumes...")
3427
      rename_back_list = [(n.children[0], o.logical_id)
3428
                          for (n, o) in zip(new_disks, self.instance.disks)]
3429
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3430
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3431
      raise
3432

    
3433
    # at this point, the instance has been modified
3434
    self.instance.disk_template = constants.DT_DRBD8
3435
    self.instance.disks = new_disks
3436
    self.cfg.Update(self.instance, feedback_fn)
3437

    
3438
    # Release node locks while waiting for sync
3439
    ReleaseLocks(self, locking.LEVEL_NODE)
3440

    
3441
    # disks are created, waiting for sync
3442
    disk_abort = not WaitForSync(self, self.instance,
3443
                                 oneshot=not self.op.wait_for_sync)
3444
    if disk_abort:
3445
      raise errors.OpExecError("There are some degraded disks for"
3446
                               " this instance, please cleanup manually")
3447

    
3448
    # Node resource locks will be released by caller
3449

    
3450
  def _ConvertDrbdToPlain(self, feedback_fn):
3451
    """Converts an instance from drbd to plain.
3452

3453
    """
3454
    assert len(self.instance.secondary_nodes) == 1
3455
    assert self.instance.disk_template == constants.DT_DRBD8
3456

    
3457
    pnode_uuid = self.instance.primary_node
3458
    snode_uuid = self.instance.secondary_nodes[0]
3459
    feedback_fn("Converting template to plain")
3460

    
3461
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3462
    new_disks = [d.children[0] for d in self.instance.disks]
3463

    
3464
    # copy over size, mode and name
3465
    for parent, child in zip(old_disks, new_disks):
3466
      child.size = parent.size
3467
      child.mode = parent.mode
3468
      child.name = parent.name
3469

    
3470
    # this is a DRBD disk, return its port to the pool
3471
    # NOTE: this must be done right before the call to cfg.Update!
3472
    for disk in old_disks:
3473
      tcp_port = disk.logical_id[2]
3474
      self.cfg.AddTcpUdpPort(tcp_port)
3475

    
3476
    # update instance structure
3477
    self.instance.disks = new_disks
3478
    self.instance.disk_template = constants.DT_PLAIN
3479
    _UpdateIvNames(0, self.instance.disks)
3480
    self.cfg.Update(self.instance, feedback_fn)
3481

    
3482
    # Release locks in case removing disks takes a while
3483
    ReleaseLocks(self, locking.LEVEL_NODE)
3484

    
3485
    feedback_fn("Removing volumes on the secondary node...")
3486
    for disk in old_disks:
3487
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3488
      result.Warn("Could not remove block device %s on node %s,"
3489
                  " continuing anyway" %
3490
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3491
                  self.LogWarning)
3492

    
3493
    feedback_fn("Removing unneeded volumes on the primary node...")
3494
    for idx, disk in enumerate(old_disks):
3495
      meta = disk.children[1]
3496
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3497
      result.Warn("Could not remove metadata for disk %d on node %s,"
3498
                  " continuing anyway" %
3499
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3500
                  self.LogWarning)
3501

    
3502
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3503
    self.LogInfo("Trying to hotplug device...")
3504
    msg = "hotplug:"
3505
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3506
                                          self.instance, action, dev_type,
3507
                                          (device, self.instance),
3508
                                          extra, seq)
3509
    if result.fail_msg:
3510
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3511
      self.LogInfo("Continuing execution..")
3512
      msg += "failed"
3513
    else:
3514
      self.LogInfo("Hotplug done.")
3515
      msg += "done"
3516
    return msg
3517

    
3518
  def _CreateNewDisk(self, idx, params, _):
3519
    """Creates a new disk.
3520

3521
    """
3522
    # add a new disk
3523
    if self.instance.disk_template in constants.DTS_FILEBASED:
3524
      (file_driver, file_path) = self.instance.disks[0].logical_id
3525
      file_path = os.path.dirname(file_path)
3526
    else:
3527
      file_driver = file_path = None
3528

    
3529
    disk = \
3530
      GenerateDiskTemplate(self, self.instance.disk_template,
3531
                           self.instance.uuid, self.instance.primary_node,
3532
                           self.instance.secondary_nodes, [params], file_path,
3533
                           file_driver, idx, self.Log, self.diskparams)[0]
3534

    
3535
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3536

    
3537
    if self.cluster.prealloc_wipe_disks:
3538
      # Wipe new disk
3539
      WipeOrCleanupDisks(self, self.instance,
3540
                         disks=[(idx, disk, 0)],
3541
                         cleanup=new_disks)
3542

    
3543
    changes = [
3544
      ("disk/%d" % idx,
3545
       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3546
      ]
3547
    if self.op.hotplug:
3548
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3549
                                               (disk, self.instance),
3550
                                               self.instance.name, True, idx)
3551
      if result.fail_msg:
3552
        changes.append(("disk/%d" % idx, "assemble:failed"))
3553
        self.LogWarning("Can't assemble newly created disk %d: %s",
3554
                        idx, result.fail_msg)
3555
      else:
3556
        _, link_name = result.payload
3557
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3558
                                  constants.HOTPLUG_TARGET_DISK,
3559
                                  disk, link_name, idx)
3560
        changes.append(("disk/%d" % idx, msg))
3561

    
3562
    return (disk, changes)
3563

    
3564
  def _PostAddDisk(self, _, disk):
3565
    if not WaitForSync(self, self.instance, disks=[disk],
3566
                       oneshot=not self.op.wait_for_sync):
3567
      raise errors.OpExecError("Failed to sync disks of %s" %
3568
                               self.instance.name)
3569

    
3570
    # the disk is active at this point, so deactivate it if the instance disks
3571
    # are supposed to be inactive
3572
    if not self.instance.disks_active:
3573
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3574

    
3575
  def _ModifyDisk(self, idx, disk, params, _):
3576
    """Modifies a disk.
3577

3578
    """
3579
    changes = []
3580
    if constants.IDISK_MODE in params:
3581
      disk.mode = params.get(constants.IDISK_MODE)
3582
      changes.append(("disk.mode/%d" % idx, disk.mode))
3583

    
3584
    if constants.IDISK_NAME in params:
3585
      disk.name = params.get(constants.IDISK_NAME)
3586
      changes.append(("disk.name/%d" % idx, disk.name))
3587

    
3588
    # Modify arbitrary params in case instance template is ext
3589
    for key, value in params.iteritems():
3590
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3591
          self.instance.disk_template == constants.DT_EXT):
3592
        # stolen from GetUpdatedParams: default means reset/delete
3593
        if value.lower() == constants.VALUE_DEFAULT:
3594
          try:
3595
            del disk.params[key]
3596
          except KeyError:
3597
            pass
3598
        else:
3599
          disk.params[key] = value
3600
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3601

    
3602
    return changes
3603

    
3604
  def _RemoveDisk(self, idx, root, _):
3605
    """Removes a disk.
3606

3607
    """
3608
    hotmsg = ""
3609
    if self.op.hotplug:
3610
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3611
                                   constants.HOTPLUG_TARGET_DISK,
3612
                                   root, None, idx)
3613
      ShutdownInstanceDisks(self, self.instance, [root])
3614

    
3615
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3616
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3617
                             self.instance.primary_node):
3618
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3619
              .fail_msg
3620
      if msg:
3621
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3622
                        " continuing anyway", idx,
3623
                        self.cfg.GetNodeName(node_uuid), msg)
3624

    
3625
    # if this is a DRBD disk, return its port to the pool
3626
    if root.dev_type in constants.DTS_DRBD:
3627
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3628

    
3629
    return hotmsg
3630

    
3631
  def _CreateNewNic(self, idx, params, private):
3632
    """Creates data structure for a new network interface.
3633

3634
    """
3635
    mac = params[constants.INIC_MAC]
3636
    ip = params.get(constants.INIC_IP, None)
3637
    net = params.get(constants.INIC_NETWORK, None)
3638
    name = params.get(constants.INIC_NAME, None)
3639
    net_uuid = self.cfg.LookupNetwork(net)
3640
    #TODO: not private.filled?? can a nic have no nicparams??
3641
    nicparams = private.filled
3642
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3643
                       nicparams=nicparams)
3644
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3645

    
3646
    changes = [
3647
      ("nic.%d" % idx,
3648
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3649
       (mac, ip, private.filled[constants.NIC_MODE],
3650
       private.filled[constants.NIC_LINK], net)),
3651
      ]
3652

    
3653
    if self.op.hotplug:
3654
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3655
                                constants.HOTPLUG_TARGET_NIC,
3656
                                nobj, None, idx)
3657
      changes.append(("nic.%d" % idx, msg))
3658

    
3659
    return (nobj, changes)
3660

    
3661
  def _ApplyNicMods(self, idx, nic, params, private):
3662
    """Modifies a network interface.
3663

3664
    """
3665
    changes = []
3666

    
3667
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3668
      if key in params:
3669
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3670
        setattr(nic, key, params[key])
3671

    
3672
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3673
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3674
    if new_net_uuid != nic.network:
3675
      changes.append(("nic.network/%d" % idx, new_net))
3676
      nic.network = new_net_uuid
3677

    
3678
    if private.filled:
3679
      nic.nicparams = private.filled
3680

    
3681
      for (key, val) in nic.nicparams.items():
3682
        changes.append(("nic.%s/%d" % (key, idx), val))
3683

    
3684
    if self.op.hotplug:
3685
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3686
                                constants.HOTPLUG_TARGET_NIC,
3687
                                nic, None, idx)
3688
      changes.append(("nic/%d" % idx, msg))
3689

    
3690
    return changes
3691

    
3692
  def _RemoveNic(self, idx, nic, _):
3693
    if self.op.hotplug:
3694
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3695
                                 constants.HOTPLUG_TARGET_NIC,
3696
                                 nic, None, idx)
3697

    
3698
  def Exec(self, feedback_fn):
3699
    """Modifies an instance.
3700

3701
    All parameters take effect only at the next restart of the instance.
3702

3703
    """
3704
    # Process here the warnings from CheckPrereq, as we don't have a
3705
    # feedback_fn there.
3706
    # TODO: Replace with self.LogWarning
3707
    for warn in self.warn:
3708
      feedback_fn("WARNING: %s" % warn)
3709

    
3710
    assert ((self.op.disk_template is None) ^
3711
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3712
      "Not owning any node resource locks"
3713

    
3714
    result = []
3715

    
3716
    # New primary node
3717
    if self.op.pnode_uuid:
3718
      self.instance.primary_node = self.op.pnode_uuid
3719

    
3720
    # runtime memory
3721
    if self.op.runtime_mem:
3722
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3723
                                                     self.instance,
3724
                                                     self.op.runtime_mem)
3725
      rpcres.Raise("Cannot modify instance runtime memory")
3726
      result.append(("runtime_memory", self.op.runtime_mem))
3727

    
3728
    # Apply disk changes
3729
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3730
                        self._CreateNewDisk, self._ModifyDisk,
3731
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3732
    _UpdateIvNames(0, self.instance.disks)
3733

    
3734
    if self.op.disk_template:
3735
      if __debug__:
3736
        check_nodes = set(self.instance.all_nodes)
3737
        if self.op.remote_node_uuid:
3738
          check_nodes.add(self.op.remote_node_uuid)
3739
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3740
          owned = self.owned_locks(level)
3741
          assert not (check_nodes - owned), \
3742
            ("Not owning the correct locks, owning %r, expected at least %r" %
3743
             (owned, check_nodes))
3744

    
3745
      r_shut = ShutdownInstanceDisks(self, self.instance)
3746
      if not r_shut:
3747
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3748
                                 " proceed with disk template conversion")
3749
      mode = (self.instance.disk_template, self.op.disk_template)
3750
      try:
3751
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3752
      except:
3753
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3754
        raise
3755
      result.append(("disk_template", self.op.disk_template))
3756

    
3757
      assert self.instance.disk_template == self.op.disk_template, \
3758
        ("Expected disk template '%s', found '%s'" %
3759
         (self.op.disk_template, self.instance.disk_template))
3760

    
3761
    # Release node and resource locks if there are any (they might already have
3762
    # been released during disk conversion)
3763
    ReleaseLocks(self, locking.LEVEL_NODE)
3764
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3765

    
3766
    # Apply NIC changes
3767
    if self._new_nics is not None:
3768
      self.instance.nics = self._new_nics
3769
      result.extend(self._nic_chgdesc)
3770

    
3771
    # hvparams changes
3772
    if self.op.hvparams:
3773
      self.instance.hvparams = self.hv_inst
3774
      for key, val in self.op.hvparams.iteritems():
3775
        result.append(("hv/%s" % key, val))
3776

    
3777
    # beparams changes
3778
    if self.op.beparams:
3779
      self.instance.beparams = self.be_inst
3780
      for key, val in self.op.beparams.iteritems():
3781
        result.append(("be/%s" % key, val))
3782

    
3783
    # OS change
3784
    if self.op.os_name:
3785
      self.instance.os = self.op.os_name
3786

    
3787
    # osparams changes
3788
    if self.op.osparams:
3789
      self.instance.osparams = self.os_inst
3790
      for key, val in self.op.osparams.iteritems():
3791
        result.append(("os/%s" % key, val))
3792

    
3793
    if self.op.osparams_private:
3794
      self.instance.osparams_private = self.os_inst_private
3795
      for key, val in self.op.osparams_private.iteritems():
3796
        # Show the Private(...) blurb.
3797
        result.append(("os_private/%s" % key, repr(val)))
3798

    
3799
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3800

    
3801
    if self.op.offline is None:
3802
      # Ignore
3803
      pass
3804
    elif self.op.offline:
3805
      # Mark instance as offline
3806
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3807
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3808
    else:
3809
      # Mark instance as online, but stopped
3810
      self.cfg.MarkInstanceDown(self.instance.uuid)
3811
      result.append(("admin_state", constants.ADMINST_DOWN))
3812

    
3813
    UpdateMetadata(feedback_fn, self.rpc, self.instance)
3814

    
3815
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3816
                self.owned_locks(locking.LEVEL_NODE)), \
3817
      "All node locks should have been released by now"
3818

    
3819
    return result
3820

    
3821
  _DISK_CONVERSIONS = {
3822
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3823
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3824
    }
3825

    
3826

    
3827
class LUInstanceChangeGroup(LogicalUnit):
3828
  HPATH = "instance-change-group"
3829
  HTYPE = constants.HTYPE_INSTANCE
3830
  REQ_BGL = False
3831

    
3832
  def ExpandNames(self):
3833
    self.share_locks = ShareAll()
3834

    
3835
    self.needed_locks = {
3836
      locking.LEVEL_NODEGROUP: [],
3837
      locking.LEVEL_NODE: [],
3838
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3839
      }
3840

    
3841
    self._ExpandAndLockInstance()
3842

    
3843
    if self.op.target_groups:
3844
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3845
                                  self.op.target_groups)
3846
    else:
3847
      self.req_target_uuids = None
3848

    
3849
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3850

    
3851
  def DeclareLocks(self, level):
3852
    if level == locking.LEVEL_NODEGROUP:
3853
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3854

    
3855
      if self.req_target_uuids:
3856
        lock_groups = set(self.req_target_uuids)
3857

    
3858
        # Lock all groups used by instance optimistically; this requires going
3859
        # via the node before it's locked, requiring verification later on
3860
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3861
        lock_groups.update(instance_groups)
3862
      else:
3863
        # No target groups, need to lock all of them
3864
        lock_groups = locking.ALL_SET
3865

    
3866
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3867

    
3868
    elif level == locking.LEVEL_NODE:
3869
      if self.req_target_uuids:
3870
        # Lock all nodes used by instances
3871
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3872
        self._LockInstancesNodes()
3873

    
3874
        # Lock all nodes in all potential target groups
3875
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3876
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3877
        member_nodes = [node_uuid
3878
                        for group in lock_groups
3879
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3880
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3881
      else:
3882
        # Lock all nodes as all groups are potential targets
3883
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3884

    
3885
  def CheckPrereq(self):
3886
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3887
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3888
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3889

    
3890
    assert (self.req_target_uuids is None or
3891
            owned_groups.issuperset(self.req_target_uuids))
3892
    assert owned_instance_names == set([self.op.instance_name])
3893

    
3894
    # Get instance information
3895
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3896

    
3897
    # Check if node groups for locked instance are still correct
3898
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3899
      ("Instance %s's nodes changed while we kept the lock" %
3900
       self.op.instance_name)
3901

    
3902
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3903
                                          owned_groups)
3904

    
3905
    if self.req_target_uuids:
3906
      # User requested specific target groups
3907
      self.target_uuids = frozenset(self.req_target_uuids)
3908
    else:
3909
      # All groups except those used by the instance are potential targets
3910
      self.target_uuids = owned_groups - inst_groups
3911

    
3912
    conflicting_groups = self.target_uuids & inst_groups
3913
    if conflicting_groups:
3914
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3915
                                 " used by the instance '%s'" %
3916
                                 (utils.CommaJoin(conflicting_groups),
3917
                                  self.op.instance_name),
3918
                                 errors.ECODE_INVAL)
3919

    
3920
    if not self.target_uuids:
3921
      raise errors.OpPrereqError("There are no possible target groups",
3922
                                 errors.ECODE_INVAL)
3923

    
3924
  def BuildHooksEnv(self):
3925
    """Build hooks env.
3926

3927
    """
3928
    assert self.target_uuids
3929

    
3930
    env = {
3931
      "TARGET_GROUPS": " ".join(self.target_uuids),
3932
      }
3933

    
3934
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3935

    
3936
    return env
3937

    
3938
  def BuildHooksNodes(self):
3939
    """Build hooks nodes.
3940

3941
    """
3942
    mn = self.cfg.GetMasterNode()
3943
    return ([mn], [mn])
3944

    
3945
  def Exec(self, feedback_fn):
3946
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3947

    
3948
    assert instances == [self.op.instance_name], "Instance not locked"
3949

    
3950
    req = iallocator.IAReqGroupChange(instances=instances,
3951
                                      target_groups=list(self.target_uuids))
3952
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3953

    
3954
    ial.Run(self.op.iallocator)
3955

    
3956
    if not ial.success:
3957
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3958
                                 " instance '%s' using iallocator '%s': %s" %
3959
                                 (self.op.instance_name, self.op.iallocator,
3960
                                  ial.info), errors.ECODE_NORES)
3961

    
3962
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3963

    
3964
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3965
                 " instance '%s'", len(jobs), self.op.instance_name)
3966

    
3967
    return ResultWithJobs(jobs)