Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 0973f9ed

History | View | Annotate | Download (138.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
64

    
65
import ganeti.masterd.instance
66

    
67

    
68
#: Type description for changes as returned by L{_ApplyContainerMods}'s
69
#: callbacks
70
_TApplyContModsCbChanges = \
71
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
72
    ht.TNonEmptyString,
73
    ht.TAny,
74
    ])))
75

    
76

    
77
def _CheckHostnameSane(lu, name):
78
  """Ensures that a given hostname resolves to a 'sane' name.
79

80
  The given name is required to be a prefix of the resolved hostname,
81
  to prevent accidental mismatches.
82

83
  @param lu: the logical unit on behalf of which we're checking
84
  @param name: the name we should resolve and check
85
  @return: the resolved hostname object
86

87
  """
88
  hostname = netutils.GetHostname(name=name)
89
  if hostname.name != name:
90
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91
  if not utils.MatchNameComponent(name, [hostname.name]):
92
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93
                                " same as given hostname '%s'") %
94
                               (hostname.name, name), errors.ECODE_INVAL)
95
  return hostname
96

    
97

    
98
def _CheckOpportunisticLocking(op):
99
  """Generate error if opportunistic locking is not possible.
100

101
  """
102
  if op.opportunistic_locking and not op.iallocator:
103
    raise errors.OpPrereqError("Opportunistic locking is only available in"
104
                               " combination with an instance allocator",
105
                               errors.ECODE_INVAL)
106

    
107

    
108
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
109
  """Wrapper around IAReqInstanceAlloc.
110

111
  @param op: The instance opcode
112
  @param disks: The computed disks
113
  @param nics: The computed nics
114
  @param beparams: The full filled beparams
115
  @param node_whitelist: List of nodes which should appear as online to the
116
    allocator (unless the node is already marked offline)
117

118
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
119

120
  """
121
  spindle_use = beparams[constants.BE_SPINDLE_USE]
122
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123
                                       disk_template=op.disk_template,
124
                                       tags=op.tags,
125
                                       os=op.os_type,
126
                                       vcpus=beparams[constants.BE_VCPUS],
127
                                       memory=beparams[constants.BE_MAXMEM],
128
                                       spindle_use=spindle_use,
129
                                       disks=disks,
130
                                       nics=[n.ToDict() for n in nics],
131
                                       hypervisor=op.hypervisor,
132
                                       node_whitelist=node_whitelist)
133

    
134

    
135
def _ComputeFullBeParams(op, cluster):
136
  """Computes the full beparams.
137

138
  @param op: The instance opcode
139
  @param cluster: The cluster config object
140

141
  @return: The fully filled beparams
142

143
  """
144
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
145
  for param, value in op.beparams.iteritems():
146
    if value == constants.VALUE_AUTO:
147
      op.beparams[param] = default_beparams[param]
148
  objects.UpgradeBeParams(op.beparams)
149
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
150
  return cluster.SimpleFillBE(op.beparams)
151

    
152

    
153
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
154
  """Computes the nics.
155

156
  @param op: The instance opcode
157
  @param cluster: Cluster configuration object
158
  @param default_ip: The default ip to assign
159
  @param cfg: An instance of the configuration object
160
  @param ec_id: Execution context ID
161

162
  @returns: The build up nics
163

164
  """
165
  nics = []
166
  for nic in op.nics:
167
    nic_mode_req = nic.get(constants.INIC_MODE, None)
168
    nic_mode = nic_mode_req
169
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
171

    
172
    net = nic.get(constants.INIC_NETWORK, None)
173
    link = nic.get(constants.NIC_LINK, None)
174
    ip = nic.get(constants.INIC_IP, None)
175

    
176
    if net is None or net.lower() == constants.VALUE_NONE:
177
      net = None
178
    else:
179
      if nic_mode_req is not None or link is not None:
180
        raise errors.OpPrereqError("If network is given, no mode or link"
181
                                   " is allowed to be passed",
182
                                   errors.ECODE_INVAL)
183

    
184
    # ip validity checks
185
    if ip is None or ip.lower() == constants.VALUE_NONE:
186
      nic_ip = None
187
    elif ip.lower() == constants.VALUE_AUTO:
188
      if not op.name_check:
189
        raise errors.OpPrereqError("IP address set to auto but name checks"
190
                                   " have been skipped",
191
                                   errors.ECODE_INVAL)
192
      nic_ip = default_ip
193
    else:
194
      # We defer pool operations until later, so that the iallocator has
195
      # filled in the instance's node(s) dimara
196
      if ip.lower() == constants.NIC_IP_POOL:
197
        if net is None:
198
          raise errors.OpPrereqError("if ip=pool, parameter network"
199
                                     " must be passed too",
200
                                     errors.ECODE_INVAL)
201

    
202
      elif not netutils.IPAddress.IsValid(ip):
203
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
204
                                   errors.ECODE_INVAL)
205

    
206
      nic_ip = ip
207

    
208
    # TODO: check the ip address for uniqueness
209
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
211
                                 errors.ECODE_INVAL)
212

    
213
    # MAC address verification
214
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216
      mac = utils.NormalizeAndValidateMac(mac)
217

    
218
      try:
219
        # TODO: We need to factor this out
220
        cfg.ReserveMAC(mac, ec_id)
221
      except errors.ReservationError:
222
        raise errors.OpPrereqError("MAC address %s already in use"
223
                                   " in cluster" % mac,
224
                                   errors.ECODE_NOTUNIQUE)
225

    
226
    #  Build nic parameters
227
    nicparams = {}
228
    if nic_mode_req:
229
      nicparams[constants.NIC_MODE] = nic_mode
230
    if link:
231
      nicparams[constants.NIC_LINK] = link
232

    
233
    check_params = cluster.SimpleFillNIC(nicparams)
234
    objects.NIC.CheckParameterSyntax(check_params)
235
    net_uuid = cfg.LookupNetwork(net)
236
    name = nic.get(constants.INIC_NAME, None)
237
    if name is not None and name.lower() == constants.VALUE_NONE:
238
      name = None
239
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240
                          network=net_uuid, nicparams=nicparams)
241
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
242
    nics.append(nic_obj)
243

    
244
  return nics
245

    
246

    
247
def _CheckForConflictingIp(lu, ip, node):
248
  """In case of conflicting IP address raise error.
249

250
  @type ip: string
251
  @param ip: IP address
252
  @type node: string
253
  @param node: node name
254

255
  """
256
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257
  if conf_net is not None:
258
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259
                                " network %s, but the target NIC does not." %
260
                                (ip, conf_net)),
261
                               errors.ECODE_STATE)
262

    
263
  return (None, None)
264

    
265

    
266
def _ComputeIPolicyInstanceSpecViolation(
267
  ipolicy, instance_spec, disk_template,
268
  _compute_fn=ComputeIPolicySpecViolation):
269
  """Compute if instance specs meets the specs of ipolicy.
270

271
  @type ipolicy: dict
272
  @param ipolicy: The ipolicy to verify against
273
  @param instance_spec: dict
274
  @param instance_spec: The instance spec to verify
275
  @type disk_template: string
276
  @param disk_template: the disk template of the instance
277
  @param _compute_fn: The function to verify ipolicy (unittest only)
278
  @see: L{ComputeIPolicySpecViolation}
279

280
  """
281
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
287

    
288
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289
                     disk_sizes, spindle_use, disk_template)
290

    
291

    
292
def _CheckOSVariant(os_obj, name):
293
  """Check whether an OS name conforms to the os variants specification.
294

295
  @type os_obj: L{objects.OS}
296
  @param os_obj: OS object to check
297
  @type name: string
298
  @param name: OS name passed by the user, to check for validity
299

300
  """
301
  variant = objects.OS.GetVariant(name)
302
  if not os_obj.supported_variants:
303
    if variant:
304
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305
                                 " passed)" % (os_obj.name, variant),
306
                                 errors.ECODE_INVAL)
307
    return
308
  if not variant:
309
    raise errors.OpPrereqError("OS name must include a variant",
310
                               errors.ECODE_INVAL)
311

    
312
  if variant not in os_obj.supported_variants:
313
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
314

    
315

    
316
class LUInstanceCreate(LogicalUnit):
317
  """Create an instance.
318

319
  """
320
  HPATH = "instance-add"
321
  HTYPE = constants.HTYPE_INSTANCE
322
  REQ_BGL = False
323

    
324
  def CheckArguments(self):
325
    """Check arguments.
326

327
    """
328
    # do not require name_check to ease forward/backward compatibility
329
    # for tools
330
    if self.op.no_install and self.op.start:
331
      self.LogInfo("No-installation mode selected, disabling startup")
332
      self.op.start = False
333
    # validate/normalize the instance name
334
    self.op.instance_name = \
335
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
336

    
337
    if self.op.ip_check and not self.op.name_check:
338
      # TODO: make the ip check more flexible and not depend on the name check
339
      raise errors.OpPrereqError("Cannot do IP address check without a name"
340
                                 " check", errors.ECODE_INVAL)
341

    
342
    # check nics' parameter names
343
    for nic in self.op.nics:
344
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345
    # check that NIC's parameters names are unique and valid
346
    utils.ValidateDeviceNames("NIC", self.op.nics)
347

    
348
    # check that disk's names are unique and valid
349
    utils.ValidateDeviceNames("disk", self.op.disks)
350

    
351
    cluster = self.cfg.GetClusterInfo()
352
    if not self.op.disk_template in cluster.enabled_disk_templates:
353
      raise errors.OpPrereqError("Cannot create an instance with disk template"
354
                                 " '%s', because it is not enabled in the"
355
                                 " cluster. Enabled disk templates are: %s." %
356
                                 (self.op.disk_template,
357
                                  ",".join(cluster.enabled_disk_templates)))
358

    
359
    # check disks. parameter names and consistent adopt/no-adopt strategy
360
    has_adopt = has_no_adopt = False
361
    for disk in self.op.disks:
362
      if self.op.disk_template != constants.DT_EXT:
363
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364
      if constants.IDISK_ADOPT in disk:
365
        has_adopt = True
366
      else:
367
        has_no_adopt = True
368
    if has_adopt and has_no_adopt:
369
      raise errors.OpPrereqError("Either all disks are adopted or none is",
370
                                 errors.ECODE_INVAL)
371
    if has_adopt:
372
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373
        raise errors.OpPrereqError("Disk adoption is not supported for the"
374
                                   " '%s' disk template" %
375
                                   self.op.disk_template,
376
                                   errors.ECODE_INVAL)
377
      if self.op.iallocator is not None:
378
        raise errors.OpPrereqError("Disk adoption not allowed with an"
379
                                   " iallocator script", errors.ECODE_INVAL)
380
      if self.op.mode == constants.INSTANCE_IMPORT:
381
        raise errors.OpPrereqError("Disk adoption not allowed for"
382
                                   " instance import", errors.ECODE_INVAL)
383
    else:
384
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
385
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386
                                   " but no 'adopt' parameter given" %
387
                                   self.op.disk_template,
388
                                   errors.ECODE_INVAL)
389

    
390
    self.adopt_disks = has_adopt
391

    
392
    # instance name verification
393
    if self.op.name_check:
394
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395
      self.op.instance_name = self.hostname1.name
396
      # used in CheckPrereq for ip ping check
397
      self.check_ip = self.hostname1.ip
398
    else:
399
      self.check_ip = None
400

    
401
    # file storage checks
402
    if (self.op.file_driver and
403
        not self.op.file_driver in constants.FILE_DRIVER):
404
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
405
                                 self.op.file_driver, errors.ECODE_INVAL)
406

    
407
    # set default file_driver if unset and required
408
    if (not self.op.file_driver and
409
        self.op.disk_template in [constants.DT_FILE,
410
                                  constants.DT_SHARED_FILE]):
411
      self.op.file_driver = constants.FD_DEFAULT
412

    
413
    if self.op.disk_template == constants.DT_FILE:
414
      opcodes.RequireFileStorage()
415
    elif self.op.disk_template == constants.DT_SHARED_FILE:
416
      opcodes.RequireSharedFileStorage()
417

    
418
    ### Node/iallocator related checks
419
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
420

    
421
    if self.op.pnode is not None:
422
      if self.op.disk_template in constants.DTS_INT_MIRROR:
423
        if self.op.snode is None:
424
          raise errors.OpPrereqError("The networked disk templates need"
425
                                     " a mirror node", errors.ECODE_INVAL)
426
      elif self.op.snode:
427
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
428
                        " template")
429
        self.op.snode = None
430

    
431
    _CheckOpportunisticLocking(self.op)
432

    
433
    self._cds = GetClusterDomainSecret()
434

    
435
    if self.op.mode == constants.INSTANCE_IMPORT:
436
      # On import force_variant must be True, because if we forced it at
437
      # initial install, our only chance when importing it back is that it
438
      # works again!
439
      self.op.force_variant = True
440

    
441
      if self.op.no_install:
442
        self.LogInfo("No-installation mode has no effect during import")
443

    
444
    elif self.op.mode == constants.INSTANCE_CREATE:
445
      if self.op.os_type is None:
446
        raise errors.OpPrereqError("No guest OS specified",
447
                                   errors.ECODE_INVAL)
448
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
449
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
450
                                   " installation" % self.op.os_type,
451
                                   errors.ECODE_STATE)
452
      if self.op.disk_template is None:
453
        raise errors.OpPrereqError("No disk template specified",
454
                                   errors.ECODE_INVAL)
455

    
456
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
457
      # Check handshake to ensure both clusters have the same domain secret
458
      src_handshake = self.op.source_handshake
459
      if not src_handshake:
460
        raise errors.OpPrereqError("Missing source handshake",
461
                                   errors.ECODE_INVAL)
462

    
463
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
464
                                                           src_handshake)
465
      if errmsg:
466
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
467
                                   errors.ECODE_INVAL)
468

    
469
      # Load and check source CA
470
      self.source_x509_ca_pem = self.op.source_x509_ca
471
      if not self.source_x509_ca_pem:
472
        raise errors.OpPrereqError("Missing source X509 CA",
473
                                   errors.ECODE_INVAL)
474

    
475
      try:
476
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
477
                                                    self._cds)
478
      except OpenSSL.crypto.Error, err:
479
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
480
                                   (err, ), errors.ECODE_INVAL)
481

    
482
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
483
      if errcode is not None:
484
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
485
                                   errors.ECODE_INVAL)
486

    
487
      self.source_x509_ca = cert
488

    
489
      src_instance_name = self.op.source_instance_name
490
      if not src_instance_name:
491
        raise errors.OpPrereqError("Missing source instance name",
492
                                   errors.ECODE_INVAL)
493

    
494
      self.source_instance_name = \
495
        netutils.GetHostname(name=src_instance_name).name
496

    
497
    else:
498
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
499
                                 self.op.mode, errors.ECODE_INVAL)
500

    
501
  def ExpandNames(self):
502
    """ExpandNames for CreateInstance.
503

504
    Figure out the right locks for instance creation.
505

506
    """
507
    self.needed_locks = {}
508

    
509
    instance_name = self.op.instance_name
510
    # this is just a preventive check, but someone might still add this
511
    # instance in the meantime, and creation will fail at lock-add time
512
    if instance_name in self.cfg.GetInstanceList():
513
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
514
                                 instance_name, errors.ECODE_EXISTS)
515

    
516
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
517

    
518
    if self.op.iallocator:
519
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
520
      # specifying a group on instance creation and then selecting nodes from
521
      # that group
522
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
523
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
524

    
525
      if self.op.opportunistic_locking:
526
        self.opportunistic_locks[locking.LEVEL_NODE] = True
527
    else:
528
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
529
      nodelist = [self.op.pnode]
530
      if self.op.snode is not None:
531
        self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
532
        nodelist.append(self.op.snode)
533
      self.needed_locks[locking.LEVEL_NODE] = nodelist
534

    
535
    # in case of import lock the source node too
536
    if self.op.mode == constants.INSTANCE_IMPORT:
537
      src_node = self.op.src_node
538
      src_path = self.op.src_path
539

    
540
      if src_path is None:
541
        self.op.src_path = src_path = self.op.instance_name
542

    
543
      if src_node is None:
544
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
545
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
546
        self.op.src_node = None
547
        if os.path.isabs(src_path):
548
          raise errors.OpPrereqError("Importing an instance from a path"
549
                                     " requires a source node option",
550
                                     errors.ECODE_INVAL)
551
      else:
552
        self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
553
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
554
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
555
        if not os.path.isabs(src_path):
556
          self.op.src_path = src_path = \
557
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
558

    
559
    self.needed_locks[locking.LEVEL_NODE_RES] = \
560
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
561

    
562
    # Optimistically acquire shared group locks (we're reading the
563
    # configuration).  We can't just call GetInstanceNodeGroups, because the
564
    # instance doesn't exist yet. Therefore we lock all node groups of all
565
    # nodes we have.
566
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
567
      # In the case we lock all nodes for opportunistic allocation, we have no
568
      # choice than to lock all groups, because they're allocated before nodes.
569
      # This is sad, but true. At least we release all those we don't need in
570
      # CheckPrereq later.
571
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
572
    else:
573
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
574
        list(self.cfg.GetNodeGroupsFromNodes(
575
          self.needed_locks[locking.LEVEL_NODE]))
576
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
577

    
578
  def DeclareLocks(self, level):
579
    if level == locking.LEVEL_NODE_RES and \
580
      self.opportunistic_locks[locking.LEVEL_NODE]:
581
      # Even when using opportunistic locking, we require the same set of
582
      # NODE_RES locks as we got NODE locks
583
      self.needed_locks[locking.LEVEL_NODE_RES] = \
584
        self.owned_locks(locking.LEVEL_NODE)
585

    
586
  def _RunAllocator(self):
587
    """Run the allocator based on input opcode.
588

589
    """
590
    if self.op.opportunistic_locking:
591
      # Only consider nodes for which a lock is held
592
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
593
    else:
594
      node_whitelist = None
595

    
596
    #TODO Export network to iallocator so that it chooses a pnode
597
    #     in a nodegroup that has the desired network connected to
598
    req = _CreateInstanceAllocRequest(self.op, self.disks,
599
                                      self.nics, self.be_full,
600
                                      node_whitelist)
601
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
602

    
603
    ial.Run(self.op.iallocator)
604

    
605
    if not ial.success:
606
      # When opportunistic locks are used only a temporary failure is generated
607
      if self.op.opportunistic_locking:
608
        ecode = errors.ECODE_TEMP_NORES
609
      else:
610
        ecode = errors.ECODE_NORES
611

    
612
      raise errors.OpPrereqError("Can't compute nodes using"
613
                                 " iallocator '%s': %s" %
614
                                 (self.op.iallocator, ial.info),
615
                                 ecode)
616

    
617
    self.op.pnode = ial.result[0]
618
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
619
                 self.op.instance_name, self.op.iallocator,
620
                 utils.CommaJoin(ial.result))
621

    
622
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
623

    
624
    if req.RequiredNodes() == 2:
625
      self.op.snode = ial.result[1]
626

    
627
  def BuildHooksEnv(self):
628
    """Build hooks env.
629

630
    This runs on master, primary and secondary nodes of the instance.
631

632
    """
633
    env = {
634
      "ADD_MODE": self.op.mode,
635
      }
636
    if self.op.mode == constants.INSTANCE_IMPORT:
637
      env["SRC_NODE"] = self.op.src_node
638
      env["SRC_PATH"] = self.op.src_path
639
      env["SRC_IMAGES"] = self.src_images
640

    
641
    env.update(BuildInstanceHookEnv(
642
      name=self.op.instance_name,
643
      primary_node=self.op.pnode,
644
      secondary_nodes=self.secondaries,
645
      status=self.op.start,
646
      os_type=self.op.os_type,
647
      minmem=self.be_full[constants.BE_MINMEM],
648
      maxmem=self.be_full[constants.BE_MAXMEM],
649
      vcpus=self.be_full[constants.BE_VCPUS],
650
      nics=NICListToTuple(self, self.nics),
651
      disk_template=self.op.disk_template,
652
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
653
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
654
             for d in self.disks],
655
      bep=self.be_full,
656
      hvp=self.hv_full,
657
      hypervisor_name=self.op.hypervisor,
658
      tags=self.op.tags,
659
      ))
660

    
661
    return env
662

    
663
  def BuildHooksNodes(self):
664
    """Build hooks nodes.
665

666
    """
667
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
668
    return nl, nl
669

    
670
  def _ReadExportInfo(self):
671
    """Reads the export information from disk.
672

673
    It will override the opcode source node and path with the actual
674
    information, if these two were not specified before.
675

676
    @return: the export information
677

678
    """
679
    assert self.op.mode == constants.INSTANCE_IMPORT
680

    
681
    src_node = self.op.src_node
682
    src_path = self.op.src_path
683

    
684
    if src_node is None:
685
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
686
      exp_list = self.rpc.call_export_list(locked_nodes)
687
      found = False
688
      for node in exp_list:
689
        if exp_list[node].fail_msg:
690
          continue
691
        if src_path in exp_list[node].payload:
692
          found = True
693
          self.op.src_node = src_node = node
694
          self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
695
                                                       src_path)
696
          break
697
      if not found:
698
        raise errors.OpPrereqError("No export found for relative path %s" %
699
                                   src_path, errors.ECODE_INVAL)
700

    
701
    CheckNodeOnline(self, src_node)
702
    result = self.rpc.call_export_info(src_node, src_path)
703
    result.Raise("No export or invalid export found in dir %s" % src_path)
704

    
705
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
706
    if not export_info.has_section(constants.INISECT_EXP):
707
      raise errors.ProgrammerError("Corrupted export config",
708
                                   errors.ECODE_ENVIRON)
709

    
710
    ei_version = export_info.get(constants.INISECT_EXP, "version")
711
    if (int(ei_version) != constants.EXPORT_VERSION):
712
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
713
                                 (ei_version, constants.EXPORT_VERSION),
714
                                 errors.ECODE_ENVIRON)
715
    return export_info
716

    
717
  def _ReadExportParams(self, einfo):
718
    """Use export parameters as defaults.
719

720
    In case the opcode doesn't specify (as in override) some instance
721
    parameters, then try to use them from the export information, if
722
    that declares them.
723

724
    """
725
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
726

    
727
    if self.op.disk_template is None:
728
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
729
        self.op.disk_template = einfo.get(constants.INISECT_INS,
730
                                          "disk_template")
731
        if self.op.disk_template not in constants.DISK_TEMPLATES:
732
          raise errors.OpPrereqError("Disk template specified in configuration"
733
                                     " file is not one of the allowed values:"
734
                                     " %s" %
735
                                     " ".join(constants.DISK_TEMPLATES),
736
                                     errors.ECODE_INVAL)
737
      else:
738
        raise errors.OpPrereqError("No disk template specified and the export"
739
                                   " is missing the disk_template information",
740
                                   errors.ECODE_INVAL)
741

    
742
    if not self.op.disks:
743
      disks = []
744
      # TODO: import the disk iv_name too
745
      for idx in range(constants.MAX_DISKS):
746
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
747
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
748
          disks.append({constants.IDISK_SIZE: disk_sz})
749
      self.op.disks = disks
750
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
751
        raise errors.OpPrereqError("No disk info specified and the export"
752
                                   " is missing the disk information",
753
                                   errors.ECODE_INVAL)
754

    
755
    if not self.op.nics:
756
      nics = []
757
      for idx in range(constants.MAX_NICS):
758
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
759
          ndict = {}
760
          for name in [constants.INIC_IP, constants.INIC_MAC]:
761
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
762
            ndict[name] = v
763
          network = einfo.get(constants.INISECT_INS,
764
                              "nic%d_%s" % (idx, constants.INIC_NETWORK))
765
          # in case network is given link and mode are inherited
766
          # from nodegroup's netparams and thus should not be passed here
767
          if network:
768
            ndict[constants.INIC_NETWORK] = network
769
          else:
770
            for name in list(constants.NICS_PARAMETERS):
771
              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
772
              ndict[name] = v
773
          nics.append(ndict)
774
        else:
775
          break
776
      self.op.nics = nics
777

    
778
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
779
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
780

    
781
    if (self.op.hypervisor is None and
782
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
783
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
784

    
785
    if einfo.has_section(constants.INISECT_HYP):
786
      # use the export parameters but do not override the ones
787
      # specified by the user
788
      for name, value in einfo.items(constants.INISECT_HYP):
789
        if name not in self.op.hvparams:
790
          self.op.hvparams[name] = value
791

    
792
    if einfo.has_section(constants.INISECT_BEP):
793
      # use the parameters, without overriding
794
      for name, value in einfo.items(constants.INISECT_BEP):
795
        if name not in self.op.beparams:
796
          self.op.beparams[name] = value
797
        # Compatibility for the old "memory" be param
798
        if name == constants.BE_MEMORY:
799
          if constants.BE_MAXMEM not in self.op.beparams:
800
            self.op.beparams[constants.BE_MAXMEM] = value
801
          if constants.BE_MINMEM not in self.op.beparams:
802
            self.op.beparams[constants.BE_MINMEM] = value
803
    else:
804
      # try to read the parameters old style, from the main section
805
      for name in constants.BES_PARAMETERS:
806
        if (name not in self.op.beparams and
807
            einfo.has_option(constants.INISECT_INS, name)):
808
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
809

    
810
    if einfo.has_section(constants.INISECT_OSP):
811
      # use the parameters, without overriding
812
      for name, value in einfo.items(constants.INISECT_OSP):
813
        if name not in self.op.osparams:
814
          self.op.osparams[name] = value
815

    
816
  def _RevertToDefaults(self, cluster):
817
    """Revert the instance parameters to the default values.
818

819
    """
820
    # hvparams
821
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
822
    for name in self.op.hvparams.keys():
823
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
824
        del self.op.hvparams[name]
825
    # beparams
826
    be_defs = cluster.SimpleFillBE({})
827
    for name in self.op.beparams.keys():
828
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
829
        del self.op.beparams[name]
830
    # nic params
831
    nic_defs = cluster.SimpleFillNIC({})
832
    for nic in self.op.nics:
833
      for name in constants.NICS_PARAMETERS:
834
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
835
          del nic[name]
836
    # osparams
837
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
838
    for name in self.op.osparams.keys():
839
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
840
        del self.op.osparams[name]
841

    
842
  def _CalculateFileStorageDir(self):
843
    """Calculate final instance file storage dir.
844

845
    """
846
    # file storage dir calculation/check
847
    self.instance_file_storage_dir = None
848
    if self.op.disk_template in constants.DTS_FILEBASED:
849
      # build the full file storage dir path
850
      joinargs = []
851

    
852
      if self.op.disk_template == constants.DT_SHARED_FILE:
853
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
854
      else:
855
        get_fsd_fn = self.cfg.GetFileStorageDir
856

    
857
      cfg_storagedir = get_fsd_fn()
858
      if not cfg_storagedir:
859
        raise errors.OpPrereqError("Cluster file storage dir not defined",
860
                                   errors.ECODE_STATE)
861
      joinargs.append(cfg_storagedir)
862

    
863
      if self.op.file_storage_dir is not None:
864
        joinargs.append(self.op.file_storage_dir)
865

    
866
      joinargs.append(self.op.instance_name)
867

    
868
      # pylint: disable=W0142
869
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
870

    
871
  def CheckPrereq(self): # pylint: disable=R0914
872
    """Check prerequisites.
873

874
    """
875
    # Check that the optimistically acquired groups are correct wrt the
876
    # acquired nodes
877
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
878
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
879
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
880
    if not owned_groups.issuperset(cur_groups):
881
      raise errors.OpPrereqError("New instance %s's node groups changed since"
882
                                 " locks were acquired, current groups are"
883
                                 " are '%s', owning groups '%s'; retry the"
884
                                 " operation" %
885
                                 (self.op.instance_name,
886
                                  utils.CommaJoin(cur_groups),
887
                                  utils.CommaJoin(owned_groups)),
888
                                 errors.ECODE_STATE)
889

    
890
    self._CalculateFileStorageDir()
891

    
892
    if self.op.mode == constants.INSTANCE_IMPORT:
893
      export_info = self._ReadExportInfo()
894
      self._ReadExportParams(export_info)
895
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
896
    else:
897
      self._old_instance_name = None
898

    
899
    if (not self.cfg.GetVGName() and
900
        self.op.disk_template not in constants.DTS_NOT_LVM):
901
      raise errors.OpPrereqError("Cluster does not support lvm-based"
902
                                 " instances", errors.ECODE_STATE)
903

    
904
    if (self.op.hypervisor is None or
905
        self.op.hypervisor == constants.VALUE_AUTO):
906
      self.op.hypervisor = self.cfg.GetHypervisorType()
907

    
908
    cluster = self.cfg.GetClusterInfo()
909
    enabled_hvs = cluster.enabled_hypervisors
910
    if self.op.hypervisor not in enabled_hvs:
911
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
912
                                 " cluster (%s)" %
913
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
914
                                 errors.ECODE_STATE)
915

    
916
    # Check tag validity
917
    for tag in self.op.tags:
918
      objects.TaggableObject.ValidateTag(tag)
919

    
920
    # check hypervisor parameter syntax (locally)
921
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
922
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
923
                                      self.op.hvparams)
924
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
925
    hv_type.CheckParameterSyntax(filled_hvp)
926
    self.hv_full = filled_hvp
927
    # check that we don't specify global parameters on an instance
928
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
929
                         "instance", "cluster")
930

    
931
    # fill and remember the beparams dict
932
    self.be_full = _ComputeFullBeParams(self.op, cluster)
933

    
934
    # build os parameters
935
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
936

    
937
    # now that hvp/bep are in final format, let's reset to defaults,
938
    # if told to do so
939
    if self.op.identify_defaults:
940
      self._RevertToDefaults(cluster)
941

    
942
    # NIC buildup
943
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
944
                             self.proc.GetECId())
945

    
946
    # disk checks/pre-build
947
    default_vg = self.cfg.GetVGName()
948
    self.disks = ComputeDisks(self.op, default_vg)
949

    
950
    if self.op.mode == constants.INSTANCE_IMPORT:
951
      disk_images = []
952
      for idx in range(len(self.disks)):
953
        option = "disk%d_dump" % idx
954
        if export_info.has_option(constants.INISECT_INS, option):
955
          # FIXME: are the old os-es, disk sizes, etc. useful?
956
          export_name = export_info.get(constants.INISECT_INS, option)
957
          image = utils.PathJoin(self.op.src_path, export_name)
958
          disk_images.append(image)
959
        else:
960
          disk_images.append(False)
961

    
962
      self.src_images = disk_images
963

    
964
      if self.op.instance_name == self._old_instance_name:
965
        for idx, nic in enumerate(self.nics):
966
          if nic.mac == constants.VALUE_AUTO:
967
            nic_mac_ini = "nic%d_mac" % idx
968
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
969

    
970
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
971

    
972
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
973
    if self.op.ip_check:
974
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
975
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
976
                                   (self.check_ip, self.op.instance_name),
977
                                   errors.ECODE_NOTUNIQUE)
978

    
979
    #### mac address generation
980
    # By generating here the mac address both the allocator and the hooks get
981
    # the real final mac address rather than the 'auto' or 'generate' value.
982
    # There is a race condition between the generation and the instance object
983
    # creation, which means that we know the mac is valid now, but we're not
984
    # sure it will be when we actually add the instance. If things go bad
985
    # adding the instance will abort because of a duplicate mac, and the
986
    # creation job will fail.
987
    for nic in self.nics:
988
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
989
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
990

    
991
    #### allocator run
992

    
993
    if self.op.iallocator is not None:
994
      self._RunAllocator()
995

    
996
    # Release all unneeded node locks
997
    keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
998
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
999
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1000
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1001
    # Release all unneeded group locks
1002
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1003
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1004

    
1005
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1006
            self.owned_locks(locking.LEVEL_NODE_RES)), \
1007
      "Node locks differ from node resource locks"
1008

    
1009
    #### node related checks
1010

    
1011
    # check primary node
1012
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
1013
    assert self.pnode is not None, \
1014
      "Cannot retrieve locked node %s" % self.op.pnode
1015
    if pnode.offline:
1016
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1017
                                 pnode.name, errors.ECODE_STATE)
1018
    if pnode.drained:
1019
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1020
                                 pnode.name, errors.ECODE_STATE)
1021
    if not pnode.vm_capable:
1022
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1023
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1024

    
1025
    self.secondaries = []
1026

    
1027
    # Fill in any IPs from IP pools. This must happen here, because we need to
1028
    # know the nic's primary node, as specified by the iallocator
1029
    for idx, nic in enumerate(self.nics):
1030
      net_uuid = nic.network
1031
      if net_uuid is not None:
1032
        nobj = self.cfg.GetNetwork(net_uuid)
1033
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
1034
        if netparams is None:
1035
          raise errors.OpPrereqError("No netparams found for network"
1036
                                     " %s. Propably not connected to"
1037
                                     " node's %s nodegroup" %
1038
                                     (nobj.name, self.pnode.name),
1039
                                     errors.ECODE_INVAL)
1040
        self.LogInfo("NIC/%d inherits netparams %s" %
1041
                     (idx, netparams.values()))
1042
        nic.nicparams = dict(netparams)
1043
        if nic.ip is not None:
1044
          if nic.ip.lower() == constants.NIC_IP_POOL:
1045
            try:
1046
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1047
            except errors.ReservationError:
1048
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1049
                                         " from the address pool" % idx,
1050
                                         errors.ECODE_STATE)
1051
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1052
          else:
1053
            try:
1054
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1055
            except errors.ReservationError:
1056
              raise errors.OpPrereqError("IP address %s already in use"
1057
                                         " or does not belong to network %s" %
1058
                                         (nic.ip, nobj.name),
1059
                                         errors.ECODE_NOTUNIQUE)
1060

    
1061
      # net is None, ip None or given
1062
      elif self.op.conflicts_check:
1063
        _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1064

    
1065
    # mirror node verification
1066
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1067
      if self.op.snode == pnode.name:
1068
        raise errors.OpPrereqError("The secondary node cannot be the"
1069
                                   " primary node", errors.ECODE_INVAL)
1070
      CheckNodeOnline(self, self.op.snode)
1071
      CheckNodeNotDrained(self, self.op.snode)
1072
      CheckNodeVmCapable(self, self.op.snode)
1073
      self.secondaries.append(self.op.snode)
1074

    
1075
      snode = self.cfg.GetNodeInfo(self.op.snode)
1076
      if pnode.group != snode.group:
1077
        self.LogWarning("The primary and secondary nodes are in two"
1078
                        " different node groups; the disk parameters"
1079
                        " from the first disk's node group will be"
1080
                        " used")
1081

    
1082
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1083
      nodes = [pnode]
1084
      if self.op.disk_template in constants.DTS_INT_MIRROR:
1085
        nodes.append(snode)
1086
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1087
      if compat.any(map(has_es, nodes)):
1088
        raise errors.OpPrereqError("Disk template %s not supported with"
1089
                                   " exclusive storage" % self.op.disk_template,
1090
                                   errors.ECODE_STATE)
1091

    
1092
    nodenames = [pnode.name] + self.secondaries
1093

    
1094
    if not self.adopt_disks:
1095
      if self.op.disk_template == constants.DT_RBD:
1096
        # _CheckRADOSFreeSpace() is just a placeholder.
1097
        # Any function that checks prerequisites can be placed here.
1098
        # Check if there is enough space on the RADOS cluster.
1099
        CheckRADOSFreeSpace()
1100
      elif self.op.disk_template == constants.DT_EXT:
1101
        # FIXME: Function that checks prereqs if needed
1102
        pass
1103
      else:
1104
        # Check lv size requirements, if not adopting
1105
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1106
        CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1107

    
1108
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1109
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1110
                                disk[constants.IDISK_ADOPT])
1111
                     for disk in self.disks])
1112
      if len(all_lvs) != len(self.disks):
1113
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1114
                                   errors.ECODE_INVAL)
1115
      for lv_name in all_lvs:
1116
        try:
1117
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1118
          # to ReserveLV uses the same syntax
1119
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1120
        except errors.ReservationError:
1121
          raise errors.OpPrereqError("LV named %s used by another instance" %
1122
                                     lv_name, errors.ECODE_NOTUNIQUE)
1123

    
1124
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1125
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1126

    
1127
      node_lvs = self.rpc.call_lv_list([pnode.name],
1128
                                       vg_names.payload.keys())[pnode.name]
1129
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1130
      node_lvs = node_lvs.payload
1131

    
1132
      delta = all_lvs.difference(node_lvs.keys())
1133
      if delta:
1134
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1135
                                   utils.CommaJoin(delta),
1136
                                   errors.ECODE_INVAL)
1137
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1138
      if online_lvs:
1139
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1140
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1141
                                   errors.ECODE_STATE)
1142
      # update the size of disk based on what is found
1143
      for dsk in self.disks:
1144
        dsk[constants.IDISK_SIZE] = \
1145
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1146
                                        dsk[constants.IDISK_ADOPT])][0]))
1147

    
1148
    elif self.op.disk_template == constants.DT_BLOCK:
1149
      # Normalize and de-duplicate device paths
1150
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1151
                       for disk in self.disks])
1152
      if len(all_disks) != len(self.disks):
1153
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1154
                                   errors.ECODE_INVAL)
1155
      baddisks = [d for d in all_disks
1156
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1157
      if baddisks:
1158
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1159
                                   " cannot be adopted" %
1160
                                   (utils.CommaJoin(baddisks),
1161
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1162
                                   errors.ECODE_INVAL)
1163

    
1164
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
1165
                                            list(all_disks))[pnode.name]
1166
      node_disks.Raise("Cannot get block device information from node %s" %
1167
                       pnode.name)
1168
      node_disks = node_disks.payload
1169
      delta = all_disks.difference(node_disks.keys())
1170
      if delta:
1171
        raise errors.OpPrereqError("Missing block device(s): %s" %
1172
                                   utils.CommaJoin(delta),
1173
                                   errors.ECODE_INVAL)
1174
      for dsk in self.disks:
1175
        dsk[constants.IDISK_SIZE] = \
1176
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1177

    
1178
    # Verify instance specs
1179
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1180
    ispec = {
1181
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1182
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1183
      constants.ISPEC_DISK_COUNT: len(self.disks),
1184
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1185
                                  for disk in self.disks],
1186
      constants.ISPEC_NIC_COUNT: len(self.nics),
1187
      constants.ISPEC_SPINDLE_USE: spindle_use,
1188
      }
1189

    
1190
    group_info = self.cfg.GetNodeGroup(pnode.group)
1191
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1192
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1193
                                               self.op.disk_template)
1194
    if not self.op.ignore_ipolicy and res:
1195
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1196
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1197
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1198

    
1199
    CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1200

    
1201
    CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1202
    # check OS parameters (remotely)
1203
    CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1204

    
1205
    CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1206

    
1207
    #TODO: _CheckExtParams (remotely)
1208
    # Check parameters for extstorage
1209

    
1210
    # memory check on primary node
1211
    #TODO(dynmem): use MINMEM for checking
1212
    if self.op.start:
1213
      CheckNodeFreeMemory(self, self.pnode.name,
1214
                          "creating instance %s" % self.op.instance_name,
1215
                          self.be_full[constants.BE_MAXMEM],
1216
                          self.op.hypervisor)
1217

    
1218
    self.dry_run_result = list(nodenames)
1219

    
1220
  def Exec(self, feedback_fn):
1221
    """Create and add the instance to the cluster.
1222

1223
    """
1224
    instance = self.op.instance_name
1225
    pnode_name = self.pnode.name
1226

    
1227
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1228
                self.owned_locks(locking.LEVEL_NODE)), \
1229
      "Node locks differ from node resource locks"
1230
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1231

    
1232
    ht_kind = self.op.hypervisor
1233
    if ht_kind in constants.HTS_REQ_PORT:
1234
      network_port = self.cfg.AllocatePort()
1235
    else:
1236
      network_port = None
1237

    
1238
    # This is ugly but we got a chicken-egg problem here
1239
    # We can only take the group disk parameters, as the instance
1240
    # has no disks yet (we are generating them right here).
1241
    node = self.cfg.GetNodeInfo(pnode_name)
1242
    nodegroup = self.cfg.GetNodeGroup(node.group)
1243
    disks = GenerateDiskTemplate(self,
1244
                                 self.op.disk_template,
1245
                                 instance, pnode_name,
1246
                                 self.secondaries,
1247
                                 self.disks,
1248
                                 self.instance_file_storage_dir,
1249
                                 self.op.file_driver,
1250
                                 0,
1251
                                 feedback_fn,
1252
                                 self.cfg.GetGroupDiskParams(nodegroup))
1253

    
1254
    iobj = objects.Instance(name=instance, os=self.op.os_type,
1255
                            primary_node=pnode_name,
1256
                            nics=self.nics, disks=disks,
1257
                            disk_template=self.op.disk_template,
1258
                            disks_active=False,
1259
                            admin_state=constants.ADMINST_DOWN,
1260
                            network_port=network_port,
1261
                            beparams=self.op.beparams,
1262
                            hvparams=self.op.hvparams,
1263
                            hypervisor=self.op.hypervisor,
1264
                            osparams=self.op.osparams,
1265
                            )
1266

    
1267
    if self.op.tags:
1268
      for tag in self.op.tags:
1269
        iobj.AddTag(tag)
1270

    
1271
    if self.adopt_disks:
1272
      if self.op.disk_template == constants.DT_PLAIN:
1273
        # rename LVs to the newly-generated names; we need to construct
1274
        # 'fake' LV disks with the old data, plus the new unique_id
1275
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1276
        rename_to = []
1277
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1278
          rename_to.append(t_dsk.logical_id)
1279
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1280
          self.cfg.SetDiskID(t_dsk, pnode_name)
1281
        result = self.rpc.call_blockdev_rename(pnode_name,
1282
                                               zip(tmp_disks, rename_to))
1283
        result.Raise("Failed to rename adoped LVs")
1284
    else:
1285
      feedback_fn("* creating instance disks...")
1286
      try:
1287
        CreateDisks(self, iobj)
1288
      except errors.OpExecError:
1289
        self.LogWarning("Device creation failed")
1290
        self.cfg.ReleaseDRBDMinors(instance)
1291
        raise
1292

    
1293
    feedback_fn("adding instance %s to cluster config" % instance)
1294

    
1295
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1296

    
1297
    # Declare that we don't want to remove the instance lock anymore, as we've
1298
    # added the instance to the config
1299
    del self.remove_locks[locking.LEVEL_INSTANCE]
1300

    
1301
    if self.op.mode == constants.INSTANCE_IMPORT:
1302
      # Release unused nodes
1303
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1304
    else:
1305
      # Release all nodes
1306
      ReleaseLocks(self, locking.LEVEL_NODE)
1307

    
1308
    disk_abort = False
1309
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1310
      feedback_fn("* wiping instance disks...")
1311
      try:
1312
        WipeDisks(self, iobj)
1313
      except errors.OpExecError, err:
1314
        logging.exception("Wiping disks failed")
1315
        self.LogWarning("Wiping instance disks failed (%s)", err)
1316
        disk_abort = True
1317

    
1318
    if disk_abort:
1319
      # Something is already wrong with the disks, don't do anything else
1320
      pass
1321
    elif self.op.wait_for_sync:
1322
      disk_abort = not WaitForSync(self, iobj)
1323
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1324
      # make sure the disks are not degraded (still sync-ing is ok)
1325
      feedback_fn("* checking mirrors status")
1326
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1327
    else:
1328
      disk_abort = False
1329

    
1330
    if disk_abort:
1331
      RemoveDisks(self, iobj)
1332
      self.cfg.RemoveInstance(iobj.name)
1333
      # Make sure the instance lock gets removed
1334
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1335
      raise errors.OpExecError("There are some degraded disks for"
1336
                               " this instance")
1337

    
1338
    # instance disks are now active
1339
    iobj.disks_active = True
1340

    
1341
    # Release all node resource locks
1342
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1343

    
1344
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1345
      # we need to set the disks ID to the primary node, since the
1346
      # preceding code might or might have not done it, depending on
1347
      # disk template and other options
1348
      for disk in iobj.disks:
1349
        self.cfg.SetDiskID(disk, pnode_name)
1350
      if self.op.mode == constants.INSTANCE_CREATE:
1351
        if not self.op.no_install:
1352
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1353
                        not self.op.wait_for_sync)
1354
          if pause_sync:
1355
            feedback_fn("* pausing disk sync to install instance OS")
1356
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1357
                                                              (iobj.disks,
1358
                                                               iobj), True)
1359
            for idx, success in enumerate(result.payload):
1360
              if not success:
1361
                logging.warn("pause-sync of instance %s for disk %d failed",
1362
                             instance, idx)
1363

    
1364
          feedback_fn("* running the instance OS create scripts...")
1365
          # FIXME: pass debug option from opcode to backend
1366
          os_add_result = \
1367
            self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1368
                                          self.op.debug_level)
1369
          if pause_sync:
1370
            feedback_fn("* resuming disk sync")
1371
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1372
                                                              (iobj.disks,
1373
                                                               iobj), False)
1374
            for idx, success in enumerate(result.payload):
1375
              if not success:
1376
                logging.warn("resume-sync of instance %s for disk %d failed",
1377
                             instance, idx)
1378

    
1379
          os_add_result.Raise("Could not add os for instance %s"
1380
                              " on node %s" % (instance, pnode_name))
1381

    
1382
      else:
1383
        if self.op.mode == constants.INSTANCE_IMPORT:
1384
          feedback_fn("* running the instance OS import scripts...")
1385

    
1386
          transfers = []
1387

    
1388
          for idx, image in enumerate(self.src_images):
1389
            if not image:
1390
              continue
1391

    
1392
            # FIXME: pass debug option from opcode to backend
1393
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1394
                                               constants.IEIO_FILE, (image, ),
1395
                                               constants.IEIO_SCRIPT,
1396
                                               (iobj.disks[idx], idx),
1397
                                               None)
1398
            transfers.append(dt)
1399

    
1400
          import_result = \
1401
            masterd.instance.TransferInstanceData(self, feedback_fn,
1402
                                                  self.op.src_node, pnode_name,
1403
                                                  self.pnode.secondary_ip,
1404
                                                  iobj, transfers)
1405
          if not compat.all(import_result):
1406
            self.LogWarning("Some disks for instance %s on node %s were not"
1407
                            " imported successfully" % (instance, pnode_name))
1408

    
1409
          rename_from = self._old_instance_name
1410

    
1411
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1412
          feedback_fn("* preparing remote import...")
1413
          # The source cluster will stop the instance before attempting to make
1414
          # a connection. In some cases stopping an instance can take a long
1415
          # time, hence the shutdown timeout is added to the connection
1416
          # timeout.
1417
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1418
                             self.op.source_shutdown_timeout)
1419
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1420

    
1421
          assert iobj.primary_node == self.pnode.name
1422
          disk_results = \
1423
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1424
                                          self.source_x509_ca,
1425
                                          self._cds, timeouts)
1426
          if not compat.all(disk_results):
1427
            # TODO: Should the instance still be started, even if some disks
1428
            # failed to import (valid for local imports, too)?
1429
            self.LogWarning("Some disks for instance %s on node %s were not"
1430
                            " imported successfully" % (instance, pnode_name))
1431

    
1432
          rename_from = self.source_instance_name
1433

    
1434
        else:
1435
          # also checked in the prereq part
1436
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1437
                                       % self.op.mode)
1438

    
1439
        # Run rename script on newly imported instance
1440
        assert iobj.name == instance
1441
        feedback_fn("Running rename script for %s" % instance)
1442
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1443
                                                   rename_from,
1444
                                                   self.op.debug_level)
1445
        if result.fail_msg:
1446
          self.LogWarning("Failed to run rename script for %s on node"
1447
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
1448

    
1449
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1450

    
1451
    if self.op.start:
1452
      iobj.admin_state = constants.ADMINST_UP
1453
      self.cfg.Update(iobj, feedback_fn)
1454
      logging.info("Starting instance %s on node %s", instance, pnode_name)
1455
      feedback_fn("* starting instance...")
1456
      result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1457
                                            False, self.op.reason)
1458
      result.Raise("Could not start instance")
1459

    
1460
    return list(iobj.all_nodes)
1461

    
1462

    
1463
class LUInstanceRename(LogicalUnit):
1464
  """Rename an instance.
1465

1466
  """
1467
  HPATH = "instance-rename"
1468
  HTYPE = constants.HTYPE_INSTANCE
1469

    
1470
  def CheckArguments(self):
1471
    """Check arguments.
1472

1473
    """
1474
    if self.op.ip_check and not self.op.name_check:
1475
      # TODO: make the ip check more flexible and not depend on the name check
1476
      raise errors.OpPrereqError("IP address check requires a name check",
1477
                                 errors.ECODE_INVAL)
1478

    
1479
  def BuildHooksEnv(self):
1480
    """Build hooks env.
1481

1482
    This runs on master, primary and secondary nodes of the instance.
1483

1484
    """
1485
    env = BuildInstanceHookEnvByObject(self, self.instance)
1486
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1487
    return env
1488

    
1489
  def BuildHooksNodes(self):
1490
    """Build hooks nodes.
1491

1492
    """
1493
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1494
    return (nl, nl)
1495

    
1496
  def CheckPrereq(self):
1497
    """Check prerequisites.
1498

1499
    This checks that the instance is in the cluster and is not running.
1500

1501
    """
1502
    self.op.instance_name = ExpandInstanceName(self.cfg,
1503
                                               self.op.instance_name)
1504
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1505
    assert instance is not None
1506
    CheckNodeOnline(self, instance.primary_node)
1507
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1508
                       msg="cannot rename")
1509
    self.instance = instance
1510

    
1511
    new_name = self.op.new_name
1512
    if self.op.name_check:
1513
      hostname = _CheckHostnameSane(self, new_name)
1514
      new_name = self.op.new_name = hostname.name
1515
      if (self.op.ip_check and
1516
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1517
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1518
                                   (hostname.ip, new_name),
1519
                                   errors.ECODE_NOTUNIQUE)
1520

    
1521
    instance_list = self.cfg.GetInstanceList()
1522
    if new_name in instance_list and new_name != instance.name:
1523
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1524
                                 new_name, errors.ECODE_EXISTS)
1525

    
1526
  def Exec(self, feedback_fn):
1527
    """Rename the instance.
1528

1529
    """
1530
    inst = self.instance
1531
    old_name = inst.name
1532

    
1533
    rename_file_storage = False
1534
    if (inst.disk_template in constants.DTS_FILEBASED and
1535
        self.op.new_name != inst.name):
1536
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1537
      rename_file_storage = True
1538

    
1539
    self.cfg.RenameInstance(inst.name, self.op.new_name)
1540
    # Change the instance lock. This is definitely safe while we hold the BGL.
1541
    # Otherwise the new lock would have to be added in acquired mode.
1542
    assert self.REQ_BGL
1543
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1544
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1545
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1546

    
1547
    # re-read the instance from the configuration after rename
1548
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
1549

    
1550
    if rename_file_storage:
1551
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1552
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1553
                                                     old_file_storage_dir,
1554
                                                     new_file_storage_dir)
1555
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1556
                   " (but the instance has been renamed in Ganeti)" %
1557
                   (inst.primary_node, old_file_storage_dir,
1558
                    new_file_storage_dir))
1559

    
1560
    StartInstanceDisks(self, inst, None)
1561
    # update info on disks
1562
    info = GetInstanceInfoText(inst)
1563
    for (idx, disk) in enumerate(inst.disks):
1564
      for node in inst.all_nodes:
1565
        self.cfg.SetDiskID(disk, node)
1566
        result = self.rpc.call_blockdev_setinfo(node, disk, info)
1567
        if result.fail_msg:
1568
          self.LogWarning("Error setting info on node %s for disk %s: %s",
1569
                          node, idx, result.fail_msg)
1570
    try:
1571
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1572
                                                 old_name, self.op.debug_level)
1573
      msg = result.fail_msg
1574
      if msg:
1575
        msg = ("Could not run OS rename script for instance %s on node %s"
1576
               " (but the instance has been renamed in Ganeti): %s" %
1577
               (inst.name, inst.primary_node, msg))
1578
        self.LogWarning(msg)
1579
    finally:
1580
      ShutdownInstanceDisks(self, inst)
1581

    
1582
    return inst.name
1583

    
1584

    
1585
class LUInstanceRemove(LogicalUnit):
1586
  """Remove an instance.
1587

1588
  """
1589
  HPATH = "instance-remove"
1590
  HTYPE = constants.HTYPE_INSTANCE
1591
  REQ_BGL = False
1592

    
1593
  def ExpandNames(self):
1594
    self._ExpandAndLockInstance()
1595
    self.needed_locks[locking.LEVEL_NODE] = []
1596
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1597
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1598

    
1599
  def DeclareLocks(self, level):
1600
    if level == locking.LEVEL_NODE:
1601
      self._LockInstancesNodes()
1602
    elif level == locking.LEVEL_NODE_RES:
1603
      # Copy node locks
1604
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1605
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1606

    
1607
  def BuildHooksEnv(self):
1608
    """Build hooks env.
1609

1610
    This runs on master, primary and secondary nodes of the instance.
1611

1612
    """
1613
    env = BuildInstanceHookEnvByObject(self, self.instance)
1614
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1615
    return env
1616

    
1617
  def BuildHooksNodes(self):
1618
    """Build hooks nodes.
1619

1620
    """
1621
    nl = [self.cfg.GetMasterNode()]
1622
    nl_post = list(self.instance.all_nodes) + nl
1623
    return (nl, nl_post)
1624

    
1625
  def CheckPrereq(self):
1626
    """Check prerequisites.
1627

1628
    This checks that the instance is in the cluster.
1629

1630
    """
1631
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1632
    assert self.instance is not None, \
1633
      "Cannot retrieve locked instance %s" % self.op.instance_name
1634

    
1635
  def Exec(self, feedback_fn):
1636
    """Remove the instance.
1637

1638
    """
1639
    instance = self.instance
1640
    logging.info("Shutting down instance %s on node %s",
1641
                 instance.name, instance.primary_node)
1642

    
1643
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1644
                                             self.op.shutdown_timeout,
1645
                                             self.op.reason)
1646
    msg = result.fail_msg
1647
    if msg:
1648
      if self.op.ignore_failures:
1649
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
1650
      else:
1651
        raise errors.OpExecError("Could not shutdown instance %s on"
1652
                                 " node %s: %s" %
1653
                                 (instance.name, instance.primary_node, msg))
1654

    
1655
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1656
            self.owned_locks(locking.LEVEL_NODE_RES))
1657
    assert not (set(instance.all_nodes) -
1658
                self.owned_locks(locking.LEVEL_NODE)), \
1659
      "Not owning correct locks"
1660

    
1661
    RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1662

    
1663

    
1664
class LUInstanceMove(LogicalUnit):
1665
  """Move an instance by data-copying.
1666

1667
  """
1668
  HPATH = "instance-move"
1669
  HTYPE = constants.HTYPE_INSTANCE
1670
  REQ_BGL = False
1671

    
1672
  def ExpandNames(self):
1673
    self._ExpandAndLockInstance()
1674
    target_node = ExpandNodeName(self.cfg, self.op.target_node)
1675
    self.op.target_node = target_node
1676
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
1677
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1678
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1679

    
1680
  def DeclareLocks(self, level):
1681
    if level == locking.LEVEL_NODE:
1682
      self._LockInstancesNodes(primary_only=True)
1683
    elif level == locking.LEVEL_NODE_RES:
1684
      # Copy node locks
1685
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1686
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1687

    
1688
  def BuildHooksEnv(self):
1689
    """Build hooks env.
1690

1691
    This runs on master, primary and secondary nodes of the instance.
1692

1693
    """
1694
    env = {
1695
      "TARGET_NODE": self.op.target_node,
1696
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1697
      }
1698
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1699
    return env
1700

    
1701
  def BuildHooksNodes(self):
1702
    """Build hooks nodes.
1703

1704
    """
1705
    nl = [
1706
      self.cfg.GetMasterNode(),
1707
      self.instance.primary_node,
1708
      self.op.target_node,
1709
      ]
1710
    return (nl, nl)
1711

    
1712
  def CheckPrereq(self):
1713
    """Check prerequisites.
1714

1715
    This checks that the instance is in the cluster.
1716

1717
    """
1718
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1719
    assert self.instance is not None, \
1720
      "Cannot retrieve locked instance %s" % self.op.instance_name
1721

    
1722
    if instance.disk_template not in constants.DTS_COPYABLE:
1723
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1724
                                 instance.disk_template, errors.ECODE_STATE)
1725

    
1726
    node = self.cfg.GetNodeInfo(self.op.target_node)
1727
    assert node is not None, \
1728
      "Cannot retrieve locked node %s" % self.op.target_node
1729

    
1730
    self.target_node = target_node = node.name
1731

    
1732
    if target_node == instance.primary_node:
1733
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1734
                                 (instance.name, target_node),
1735
                                 errors.ECODE_STATE)
1736

    
1737
    bep = self.cfg.GetClusterInfo().FillBE(instance)
1738

    
1739
    for idx, dsk in enumerate(instance.disks):
1740
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1741
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1742
                                   " cannot copy" % idx, errors.ECODE_STATE)
1743

    
1744
    CheckNodeOnline(self, target_node)
1745
    CheckNodeNotDrained(self, target_node)
1746
    CheckNodeVmCapable(self, target_node)
1747
    cluster = self.cfg.GetClusterInfo()
1748
    group_info = self.cfg.GetNodeGroup(node.group)
1749
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1750
    CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1751
                           ignore=self.op.ignore_ipolicy)
1752

    
1753
    if instance.admin_state == constants.ADMINST_UP:
1754
      # check memory requirements on the secondary node
1755
      CheckNodeFreeMemory(self, target_node,
1756
                          "failing over instance %s" %
1757
                          instance.name, bep[constants.BE_MAXMEM],
1758
                          instance.hypervisor)
1759
    else:
1760
      self.LogInfo("Not checking memory on the secondary node as"
1761
                   " instance will not be started")
1762

    
1763
    # check bridge existance
1764
    CheckInstanceBridgesExist(self, instance, node=target_node)
1765

    
1766
  def Exec(self, feedback_fn):
1767
    """Move an instance.
1768

1769
    The move is done by shutting it down on its present node, copying
1770
    the data over (slow) and starting it on the new node.
1771

1772
    """
1773
    instance = self.instance
1774

    
1775
    source_node = instance.primary_node
1776
    target_node = self.target_node
1777

    
1778
    self.LogInfo("Shutting down instance %s on source node %s",
1779
                 instance.name, source_node)
1780

    
1781
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1782
            self.owned_locks(locking.LEVEL_NODE_RES))
1783

    
1784
    result = self.rpc.call_instance_shutdown(source_node, instance,
1785
                                             self.op.shutdown_timeout,
1786
                                             self.op.reason)
1787
    msg = result.fail_msg
1788
    if msg:
1789
      if self.op.ignore_consistency:
1790
        self.LogWarning("Could not shutdown instance %s on node %s."
1791
                        " Proceeding anyway. Please make sure node"
1792
                        " %s is down. Error details: %s",
1793
                        instance.name, source_node, source_node, msg)
1794
      else:
1795
        raise errors.OpExecError("Could not shutdown instance %s on"
1796
                                 " node %s: %s" %
1797
                                 (instance.name, source_node, msg))
1798

    
1799
    # create the target disks
1800
    try:
1801
      CreateDisks(self, instance, target_node=target_node)
1802
    except errors.OpExecError:
1803
      self.LogWarning("Device creation failed")
1804
      self.cfg.ReleaseDRBDMinors(instance.name)
1805
      raise
1806

    
1807
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1808

    
1809
    errs = []
1810
    # activate, get path, copy the data over
1811
    for idx, disk in enumerate(instance.disks):
1812
      self.LogInfo("Copying data for disk %d", idx)
1813
      result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1814
                                               instance.name, True, idx)
1815
      if result.fail_msg:
1816
        self.LogWarning("Can't assemble newly created disk %d: %s",
1817
                        idx, result.fail_msg)
1818
        errs.append(result.fail_msg)
1819
        break
1820
      dev_path = result.payload
1821
      result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1822
                                             target_node, dev_path,
1823
                                             cluster_name)
1824
      if result.fail_msg:
1825
        self.LogWarning("Can't copy data over for disk %d: %s",
1826
                        idx, result.fail_msg)
1827
        errs.append(result.fail_msg)
1828
        break
1829

    
1830
    if errs:
1831
      self.LogWarning("Some disks failed to copy, aborting")
1832
      try:
1833
        RemoveDisks(self, instance, target_node=target_node)
1834
      finally:
1835
        self.cfg.ReleaseDRBDMinors(instance.name)
1836
        raise errors.OpExecError("Errors during disk copy: %s" %
1837
                                 (",".join(errs),))
1838

    
1839
    instance.primary_node = target_node
1840
    self.cfg.Update(instance, feedback_fn)
1841

    
1842
    self.LogInfo("Removing the disks on the original node")
1843
    RemoveDisks(self, instance, target_node=source_node)
1844

    
1845
    # Only start the instance if it's marked as up
1846
    if instance.admin_state == constants.ADMINST_UP:
1847
      self.LogInfo("Starting instance %s on node %s",
1848
                   instance.name, target_node)
1849

    
1850
      disks_ok, _ = AssembleInstanceDisks(self, instance,
1851
                                          ignore_secondaries=True)
1852
      if not disks_ok:
1853
        ShutdownInstanceDisks(self, instance)
1854
        raise errors.OpExecError("Can't activate the instance's disks")
1855

    
1856
      result = self.rpc.call_instance_start(target_node,
1857
                                            (instance, None, None), False,
1858
                                            self.op.reason)
1859
      msg = result.fail_msg
1860
      if msg:
1861
        ShutdownInstanceDisks(self, instance)
1862
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1863
                                 (instance.name, target_node, msg))
1864

    
1865

    
1866
class LUInstanceMultiAlloc(NoHooksLU):
1867
  """Allocates multiple instances at the same time.
1868

1869
  """
1870
  REQ_BGL = False
1871

    
1872
  def CheckArguments(self):
1873
    """Check arguments.
1874

1875
    """
1876
    nodes = []
1877
    for inst in self.op.instances:
1878
      if inst.iallocator is not None:
1879
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1880
                                   " instance objects", errors.ECODE_INVAL)
1881
      nodes.append(bool(inst.pnode))
1882
      if inst.disk_template in constants.DTS_INT_MIRROR:
1883
        nodes.append(bool(inst.snode))
1884

    
1885
    has_nodes = compat.any(nodes)
1886
    if compat.all(nodes) ^ has_nodes:
1887
      raise errors.OpPrereqError("There are instance objects providing"
1888
                                 " pnode/snode while others do not",
1889
                                 errors.ECODE_INVAL)
1890

    
1891
    if not has_nodes and self.op.iallocator is None:
1892
      default_iallocator = self.cfg.GetDefaultIAllocator()
1893
      if default_iallocator:
1894
        self.op.iallocator = default_iallocator
1895
      else:
1896
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1897
                                   " given and no cluster-wide default"
1898
                                   " iallocator found; please specify either"
1899
                                   " an iallocator or nodes on the instances"
1900
                                   " or set a cluster-wide default iallocator",
1901
                                   errors.ECODE_INVAL)
1902

    
1903
    _CheckOpportunisticLocking(self.op)
1904

    
1905
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1906
    if dups:
1907
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1908
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1909

    
1910
  def ExpandNames(self):
1911
    """Calculate the locks.
1912

1913
    """
1914
    self.share_locks = ShareAll()
1915
    self.needed_locks = {
1916
      # iallocator will select nodes and even if no iallocator is used,
1917
      # collisions with LUInstanceCreate should be avoided
1918
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1919
      }
1920

    
1921
    if self.op.iallocator:
1922
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1923
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1924

    
1925
      if self.op.opportunistic_locking:
1926
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1927
    else:
1928
      nodeslist = []
1929
      for inst in self.op.instances:
1930
        inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1931
        nodeslist.append(inst.pnode)
1932
        if inst.snode is not None:
1933
          inst.snode = ExpandNodeName(self.cfg, inst.snode)
1934
          nodeslist.append(inst.snode)
1935

    
1936
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1937
      # Lock resources of instance's primary and secondary nodes (copy to
1938
      # prevent accidential modification)
1939
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1940

    
1941
  def DeclareLocks(self, level):
1942
    if level == locking.LEVEL_NODE_RES and \
1943
      self.opportunistic_locks[locking.LEVEL_NODE]:
1944
      # Even when using opportunistic locking, we require the same set of
1945
      # NODE_RES locks as we got NODE locks
1946
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1947
        self.owned_locks(locking.LEVEL_NODE)
1948

    
1949
  def CheckPrereq(self):
1950
    """Check prerequisite.
1951

1952
    """
1953
    if self.op.iallocator:
1954
      cluster = self.cfg.GetClusterInfo()
1955
      default_vg = self.cfg.GetVGName()
1956
      ec_id = self.proc.GetECId()
1957

    
1958
      if self.op.opportunistic_locking:
1959
        # Only consider nodes for which a lock is held
1960
        node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1961
      else:
1962
        node_whitelist = None
1963

    
1964
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1965
                                           _ComputeNics(op, cluster, None,
1966
                                                        self.cfg, ec_id),
1967
                                           _ComputeFullBeParams(op, cluster),
1968
                                           node_whitelist)
1969
               for op in self.op.instances]
1970

    
1971
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1972
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1973

    
1974
      ial.Run(self.op.iallocator)
1975

    
1976
      if not ial.success:
1977
        raise errors.OpPrereqError("Can't compute nodes using"
1978
                                   " iallocator '%s': %s" %
1979
                                   (self.op.iallocator, ial.info),
1980
                                   errors.ECODE_NORES)
1981

    
1982
      self.ia_result = ial.result
1983

    
1984
    if self.op.dry_run:
1985
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1986
        constants.JOB_IDS_KEY: [],
1987
        })
1988

    
1989
  def _ConstructPartialResult(self):
1990
    """Contructs the partial result.
1991

1992
    """
1993
    if self.op.iallocator:
1994
      (allocatable, failed_insts) = self.ia_result
1995
      allocatable_insts = map(compat.fst, allocatable)
1996
    else:
1997
      allocatable_insts = [op.instance_name for op in self.op.instances]
1998
      failed_insts = []
1999

    
2000
    return {
2001
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
2002
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
2003
      }
2004

    
2005
  def Exec(self, feedback_fn):
2006
    """Executes the opcode.
2007

2008
    """
2009
    jobs = []
2010
    if self.op.iallocator:
2011
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2012
      (allocatable, failed) = self.ia_result
2013

    
2014
      for (name, nodes) in allocatable:
2015
        op = op2inst.pop(name)
2016

    
2017
        if len(nodes) > 1:
2018
          (op.pnode, op.snode) = nodes
2019
        else:
2020
          (op.pnode,) = nodes
2021

    
2022
        jobs.append([op])
2023

    
2024
      missing = set(op2inst.keys()) - set(failed)
2025
      assert not missing, \
2026
        "Iallocator did return incomplete result: %s" % \
2027
        utils.CommaJoin(missing)
2028
    else:
2029
      jobs.extend([op] for op in self.op.instances)
2030

    
2031
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2032

    
2033

    
2034
class _InstNicModPrivate:
2035
  """Data structure for network interface modifications.
2036

2037
  Used by L{LUInstanceSetParams}.
2038

2039
  """
2040
  def __init__(self):
2041
    self.params = None
2042
    self.filled = None
2043

    
2044

    
2045
def _PrepareContainerMods(mods, private_fn):
2046
  """Prepares a list of container modifications by adding a private data field.
2047

2048
  @type mods: list of tuples; (operation, index, parameters)
2049
  @param mods: List of modifications
2050
  @type private_fn: callable or None
2051
  @param private_fn: Callable for constructing a private data field for a
2052
    modification
2053
  @rtype: list
2054

2055
  """
2056
  if private_fn is None:
2057
    fn = lambda: None
2058
  else:
2059
    fn = private_fn
2060

    
2061
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2062

    
2063

    
2064
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
2065
  """Checks if nodes have enough physical CPUs
2066

2067
  This function checks if all given nodes have the needed number of
2068
  physical CPUs. In case any node has less CPUs or we cannot get the
2069
  information from the node, this function raises an OpPrereqError
2070
  exception.
2071

2072
  @type lu: C{LogicalUnit}
2073
  @param lu: a logical unit from which we get configuration data
2074
  @type nodenames: C{list}
2075
  @param nodenames: the list of node names to check
2076
  @type requested: C{int}
2077
  @param requested: the minimum acceptable number of physical CPUs
2078
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2079
      or we cannot check the node
2080

2081
  """
2082
  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2083
  for node in nodenames:
2084
    info = nodeinfo[node]
2085
    info.Raise("Cannot get current information from node %s" % node,
2086
               prereq=True, ecode=errors.ECODE_ENVIRON)
2087
    (_, _, (hv_info, )) = info.payload
2088
    num_cpus = hv_info.get("cpu_total", None)
2089
    if not isinstance(num_cpus, int):
2090
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2091
                                 " on node %s, result was '%s'" %
2092
                                 (node, num_cpus), errors.ECODE_ENVIRON)
2093
    if requested > num_cpus:
2094
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2095
                                 "required" % (node, num_cpus, requested),
2096
                                 errors.ECODE_NORES)
2097

    
2098

    
2099
def GetItemFromContainer(identifier, kind, container):
2100
  """Return the item refered by the identifier.
2101

2102
  @type identifier: string
2103
  @param identifier: Item index or name or UUID
2104
  @type kind: string
2105
  @param kind: One-word item description
2106
  @type container: list
2107
  @param container: Container to get the item from
2108

2109
  """
2110
  # Index
2111
  try:
2112
    idx = int(identifier)
2113
    if idx == -1:
2114
      # Append
2115
      absidx = len(container) - 1
2116
    elif idx < 0:
2117
      raise IndexError("Not accepting negative indices other than -1")
2118
    elif idx > len(container):
2119
      raise IndexError("Got %s index %s, but there are only %s" %
2120
                       (kind, idx, len(container)))
2121
    else:
2122
      absidx = idx
2123
    return (absidx, container[idx])
2124
  except ValueError:
2125
    pass
2126

    
2127
  for idx, item in enumerate(container):
2128
    if item.uuid == identifier or item.name == identifier:
2129
      return (idx, item)
2130

    
2131
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2132
                             (kind, identifier), errors.ECODE_NOENT)
2133

    
2134

    
2135
def _ApplyContainerMods(kind, container, chgdesc, mods,
2136
                        create_fn, modify_fn, remove_fn):
2137
  """Applies descriptions in C{mods} to C{container}.
2138

2139
  @type kind: string
2140
  @param kind: One-word item description
2141
  @type container: list
2142
  @param container: Container to modify
2143
  @type chgdesc: None or list
2144
  @param chgdesc: List of applied changes
2145
  @type mods: list
2146
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2147
  @type create_fn: callable
2148
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2149
    receives absolute item index, parameters and private data object as added
2150
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2151
    as list
2152
  @type modify_fn: callable
2153
  @param modify_fn: Callback for modifying an existing item
2154
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2155
    and private data object as added by L{_PrepareContainerMods}, returns
2156
    changes as list
2157
  @type remove_fn: callable
2158
  @param remove_fn: Callback on removing item; receives absolute item index,
2159
    item and private data object as added by L{_PrepareContainerMods}
2160

2161
  """
2162
  for (op, identifier, params, private) in mods:
2163
    changes = None
2164

    
2165
    if op == constants.DDM_ADD:
2166
      # Calculate where item will be added
2167
      # When adding an item, identifier can only be an index
2168
      try:
2169
        idx = int(identifier)
2170
      except ValueError:
2171
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2172
                                   " identifier for %s" % constants.DDM_ADD,
2173
                                   errors.ECODE_INVAL)
2174
      if idx == -1:
2175
        addidx = len(container)
2176
      else:
2177
        if idx < 0:
2178
          raise IndexError("Not accepting negative indices other than -1")
2179
        elif idx > len(container):
2180
          raise IndexError("Got %s index %s, but there are only %s" %
2181
                           (kind, idx, len(container)))
2182
        addidx = idx
2183

    
2184
      if create_fn is None:
2185
        item = params
2186
      else:
2187
        (item, changes) = create_fn(addidx, params, private)
2188

    
2189
      if idx == -1:
2190
        container.append(item)
2191
      else:
2192
        assert idx >= 0
2193
        assert idx <= len(container)
2194
        # list.insert does so before the specified index
2195
        container.insert(idx, item)
2196
    else:
2197
      # Retrieve existing item
2198
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2199

    
2200
      if op == constants.DDM_REMOVE:
2201
        assert not params
2202

    
2203
        if remove_fn is not None:
2204
          remove_fn(absidx, item, private)
2205

    
2206
        changes = [("%s/%s" % (kind, absidx), "remove")]
2207

    
2208
        assert container[absidx] == item
2209
        del container[absidx]
2210
      elif op == constants.DDM_MODIFY:
2211
        if modify_fn is not None:
2212
          changes = modify_fn(absidx, item, params, private)
2213
      else:
2214
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2215

    
2216
    assert _TApplyContModsCbChanges(changes)
2217

    
2218
    if not (chgdesc is None or changes is None):
2219
      chgdesc.extend(changes)
2220

    
2221

    
2222
def _UpdateIvNames(base_index, disks):
2223
  """Updates the C{iv_name} attribute of disks.
2224

2225
  @type disks: list of L{objects.Disk}
2226

2227
  """
2228
  for (idx, disk) in enumerate(disks):
2229
    disk.iv_name = "disk/%s" % (base_index + idx, )
2230

    
2231

    
2232
class LUInstanceSetParams(LogicalUnit):
2233
  """Modifies an instances's parameters.
2234

2235
  """
2236
  HPATH = "instance-modify"
2237
  HTYPE = constants.HTYPE_INSTANCE
2238
  REQ_BGL = False
2239

    
2240
  @staticmethod
2241
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2242
    assert ht.TList(mods)
2243
    assert not mods or len(mods[0]) in (2, 3)
2244

    
2245
    if mods and len(mods[0]) == 2:
2246
      result = []
2247

    
2248
      addremove = 0
2249
      for op, params in mods:
2250
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2251
          result.append((op, -1, params))
2252
          addremove += 1
2253

    
2254
          if addremove > 1:
2255
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2256
                                       " supported at a time" % kind,
2257
                                       errors.ECODE_INVAL)
2258
        else:
2259
          result.append((constants.DDM_MODIFY, op, params))
2260

    
2261
      assert verify_fn(result)
2262
    else:
2263
      result = mods
2264

    
2265
    return result
2266

    
2267
  @staticmethod
2268
  def _CheckMods(kind, mods, key_types, item_fn):
2269
    """Ensures requested disk/NIC modifications are valid.
2270

2271
    """
2272
    for (op, _, params) in mods:
2273
      assert ht.TDict(params)
2274

    
2275
      # If 'key_types' is an empty dict, we assume we have an
2276
      # 'ext' template and thus do not ForceDictType
2277
      if key_types:
2278
        utils.ForceDictType(params, key_types)
2279

    
2280
      if op == constants.DDM_REMOVE:
2281
        if params:
2282
          raise errors.OpPrereqError("No settings should be passed when"
2283
                                     " removing a %s" % kind,
2284
                                     errors.ECODE_INVAL)
2285
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2286
        item_fn(op, params)
2287
      else:
2288
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2289

    
2290
  def _VerifyDiskModification(self, op, params):
2291
    """Verifies a disk modification.
2292

2293
    """
2294
    if op == constants.DDM_ADD:
2295
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2296
      if mode not in constants.DISK_ACCESS_SET:
2297
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2298
                                   errors.ECODE_INVAL)
2299

    
2300
      size = params.get(constants.IDISK_SIZE, None)
2301
      if size is None:
2302
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2303
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2304

    
2305
      try:
2306
        size = int(size)
2307
      except (TypeError, ValueError), err:
2308
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2309
                                   errors.ECODE_INVAL)
2310

    
2311
      params[constants.IDISK_SIZE] = size
2312
      name = params.get(constants.IDISK_NAME, None)
2313
      if name is not None and name.lower() == constants.VALUE_NONE:
2314
        params[constants.IDISK_NAME] = None
2315

    
2316
    elif op == constants.DDM_MODIFY:
2317
      if constants.IDISK_SIZE in params:
2318
        raise errors.OpPrereqError("Disk size change not possible, use"
2319
                                   " grow-disk", errors.ECODE_INVAL)
2320

    
2321
      # Disk modification supports changing only the disk name and mode.
2322
      # Changing arbitrary parameters is allowed only for ext disk template",
2323
      if self.instance.disk_template != constants.DT_EXT:
2324
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2325

    
2326
      name = params.get(constants.IDISK_NAME, None)
2327
      if name is not None and name.lower() == constants.VALUE_NONE:
2328
        params[constants.IDISK_NAME] = None
2329

    
2330
  @staticmethod
2331
  def _VerifyNicModification(op, params):
2332
    """Verifies a network interface modification.
2333

2334
    """
2335
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2336
      ip = params.get(constants.INIC_IP, None)
2337
      name = params.get(constants.INIC_NAME, None)
2338
      req_net = params.get(constants.INIC_NETWORK, None)
2339
      link = params.get(constants.NIC_LINK, None)
2340
      mode = params.get(constants.NIC_MODE, None)
2341
      if name is not None and name.lower() == constants.VALUE_NONE:
2342
        params[constants.INIC_NAME] = None
2343
      if req_net is not None:
2344
        if req_net.lower() == constants.VALUE_NONE:
2345
          params[constants.INIC_NETWORK] = None
2346
          req_net = None
2347
        elif link is not None or mode is not None:
2348
          raise errors.OpPrereqError("If network is given"
2349
                                     " mode or link should not",
2350
                                     errors.ECODE_INVAL)
2351

    
2352
      if op == constants.DDM_ADD:
2353
        macaddr = params.get(constants.INIC_MAC, None)
2354
        if macaddr is None:
2355
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2356

    
2357
      if ip is not None:
2358
        if ip.lower() == constants.VALUE_NONE:
2359
          params[constants.INIC_IP] = None
2360
        else:
2361
          if ip.lower() == constants.NIC_IP_POOL:
2362
            if op == constants.DDM_ADD and req_net is None:
2363
              raise errors.OpPrereqError("If ip=pool, parameter network"
2364
                                         " cannot be none",
2365
                                         errors.ECODE_INVAL)
2366
          else:
2367
            if not netutils.IPAddress.IsValid(ip):
2368
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2369
                                         errors.ECODE_INVAL)
2370

    
2371
      if constants.INIC_MAC in params:
2372
        macaddr = params[constants.INIC_MAC]
2373
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2374
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2375

    
2376
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2377
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2378
                                     " modifying an existing NIC",
2379
                                     errors.ECODE_INVAL)
2380

    
2381
  def CheckArguments(self):
2382
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2383
            self.op.hvparams or self.op.beparams or self.op.os_name or
2384
            self.op.osparams or self.op.offline is not None or
2385
            self.op.runtime_mem or self.op.pnode):
2386
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2387

    
2388
    if self.op.hvparams:
2389
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2390
                           "hypervisor", "instance", "cluster")
2391

    
2392
    self.op.disks = self._UpgradeDiskNicMods(
2393
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2394
    self.op.nics = self._UpgradeDiskNicMods(
2395
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2396

    
2397
    if self.op.disks and self.op.disk_template is not None:
2398
      raise errors.OpPrereqError("Disk template conversion and other disk"
2399
                                 " changes not supported at the same time",
2400
                                 errors.ECODE_INVAL)
2401

    
2402
    if (self.op.disk_template and
2403
        self.op.disk_template in constants.DTS_INT_MIRROR and
2404
        self.op.remote_node is None):
2405
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2406
                                 " one requires specifying a secondary node",
2407
                                 errors.ECODE_INVAL)
2408

    
2409
    # Check NIC modifications
2410
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2411
                    self._VerifyNicModification)
2412

    
2413
    if self.op.pnode:
2414
      self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2415

    
2416
  def ExpandNames(self):
2417
    self._ExpandAndLockInstance()
2418
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2419
    # Can't even acquire node locks in shared mode as upcoming changes in
2420
    # Ganeti 2.6 will start to modify the node object on disk conversion
2421
    self.needed_locks[locking.LEVEL_NODE] = []
2422
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2423
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2424
    # Look node group to look up the ipolicy
2425
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2426

    
2427
  def DeclareLocks(self, level):
2428
    if level == locking.LEVEL_NODEGROUP:
2429
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2430
      # Acquire locks for the instance's nodegroups optimistically. Needs
2431
      # to be verified in CheckPrereq
2432
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2433
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2434
    elif level == locking.LEVEL_NODE:
2435
      self._LockInstancesNodes()
2436
      if self.op.disk_template and self.op.remote_node:
2437
        self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2438
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2439
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2440
      # Copy node locks
2441
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2442
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2443

    
2444
  def BuildHooksEnv(self):
2445
    """Build hooks env.
2446

2447
    This runs on the master, primary and secondaries.
2448

2449
    """
2450
    args = {}
2451
    if constants.BE_MINMEM in self.be_new:
2452
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2453
    if constants.BE_MAXMEM in self.be_new:
2454
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2455
    if constants.BE_VCPUS in self.be_new:
2456
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2457
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2458
    # information at all.
2459

    
2460
    if self._new_nics is not None:
2461
      nics = []
2462

    
2463
      for nic in self._new_nics:
2464
        n = copy.deepcopy(nic)
2465
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2466
        n.nicparams = nicparams
2467
        nics.append(NICToTuple(self, n))
2468

    
2469
      args["nics"] = nics
2470

    
2471
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2472
    if self.op.disk_template:
2473
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2474
    if self.op.runtime_mem:
2475
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2476

    
2477
    return env
2478

    
2479
  def BuildHooksNodes(self):
2480
    """Build hooks nodes.
2481

2482
    """
2483
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2484
    return (nl, nl)
2485

    
2486
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2487
                              old_params, cluster, pnode):
2488

    
2489
    update_params_dict = dict([(key, params[key])
2490
                               for key in constants.NICS_PARAMETERS
2491
                               if key in params])
2492

    
2493
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2494
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2495

    
2496
    new_net_uuid = None
2497
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2498
    if new_net_uuid_or_name:
2499
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2500
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2501

    
2502
    if old_net_uuid:
2503
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2504

    
2505
    if new_net_uuid:
2506
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2507
      if not netparams:
2508
        raise errors.OpPrereqError("No netparams found for the network"
2509
                                   " %s, probably not connected" %
2510
                                   new_net_obj.name, errors.ECODE_INVAL)
2511
      new_params = dict(netparams)
2512
    else:
2513
      new_params = GetUpdatedParams(old_params, update_params_dict)
2514

    
2515
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2516

    
2517
    new_filled_params = cluster.SimpleFillNIC(new_params)
2518
    objects.NIC.CheckParameterSyntax(new_filled_params)
2519

    
2520
    new_mode = new_filled_params[constants.NIC_MODE]
2521
    if new_mode == constants.NIC_MODE_BRIDGED:
2522
      bridge = new_filled_params[constants.NIC_LINK]
2523
      msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2524
      if msg:
2525
        msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2526
        if self.op.force:
2527
          self.warn.append(msg)
2528
        else:
2529
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2530

    
2531
    elif new_mode == constants.NIC_MODE_ROUTED:
2532
      ip = params.get(constants.INIC_IP, old_ip)
2533
      if ip is None:
2534
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2535
                                   " on a routed NIC", errors.ECODE_INVAL)
2536

    
2537
    elif new_mode == constants.NIC_MODE_OVS:
2538
      # TODO: check OVS link
2539
      self.LogInfo("OVS links are currently not checked for correctness")
2540

    
2541
    if constants.INIC_MAC in params:
2542
      mac = params[constants.INIC_MAC]
2543
      if mac is None:
2544
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2545
                                   errors.ECODE_INVAL)
2546
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2547
        # otherwise generate the MAC address
2548
        params[constants.INIC_MAC] = \
2549
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2550
      else:
2551
        # or validate/reserve the current one
2552
        try:
2553
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2554
        except errors.ReservationError:
2555
          raise errors.OpPrereqError("MAC address '%s' already in use"
2556
                                     " in cluster" % mac,
2557
                                     errors.ECODE_NOTUNIQUE)
2558
    elif new_net_uuid != old_net_uuid:
2559

    
2560
      def get_net_prefix(net_uuid):
2561
        mac_prefix = None
2562
        if net_uuid:
2563
          nobj = self.cfg.GetNetwork(net_uuid)
2564
          mac_prefix = nobj.mac_prefix
2565

    
2566
        return mac_prefix
2567

    
2568
      new_prefix = get_net_prefix(new_net_uuid)
2569
      old_prefix = get_net_prefix(old_net_uuid)
2570
      if old_prefix != new_prefix:
2571
        params[constants.INIC_MAC] = \
2572
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2573

    
2574
    # if there is a change in (ip, network) tuple
2575
    new_ip = params.get(constants.INIC_IP, old_ip)
2576
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2577
      if new_ip:
2578
        # if IP is pool then require a network and generate one IP
2579
        if new_ip.lower() == constants.NIC_IP_POOL:
2580
          if new_net_uuid:
2581
            try:
2582
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2583
            except errors.ReservationError:
2584
              raise errors.OpPrereqError("Unable to get a free IP"
2585
                                         " from the address pool",
2586
                                         errors.ECODE_STATE)
2587
            self.LogInfo("Chose IP %s from network %s",
2588
                         new_ip,
2589
                         new_net_obj.name)
2590
            params[constants.INIC_IP] = new_ip
2591
          else:
2592
            raise errors.OpPrereqError("ip=pool, but no network found",
2593
                                       errors.ECODE_INVAL)
2594
        # Reserve new IP if in the new network if any
2595
        elif new_net_uuid:
2596
          try:
2597
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2598
            self.LogInfo("Reserving IP %s in network %s",
2599
                         new_ip, new_net_obj.name)
2600
          except errors.ReservationError:
2601
            raise errors.OpPrereqError("IP %s not available in network %s" %
2602
                                       (new_ip, new_net_obj.name),
2603
                                       errors.ECODE_NOTUNIQUE)
2604
        # new network is None so check if new IP is a conflicting IP
2605
        elif self.op.conflicts_check:
2606
          _CheckForConflictingIp(self, new_ip, pnode)
2607

    
2608
      # release old IP if old network is not None
2609
      if old_ip and old_net_uuid:
2610
        try:
2611
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2612
        except errors.AddressPoolError:
2613
          logging.warning("Release IP %s not contained in network %s",
2614
                          old_ip, old_net_obj.name)
2615

    
2616
    # there are no changes in (ip, network) tuple and old network is not None
2617
    elif (old_net_uuid is not None and
2618
          (req_link is not None or req_mode is not None)):
2619
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2620
                                 " a NIC that is connected to a network",
2621
                                 errors.ECODE_INVAL)
2622

    
2623
    private.params = new_params
2624
    private.filled = new_filled_params
2625

    
2626
  def _PreCheckDiskTemplate(self, pnode_info):
2627
    """CheckPrereq checks related to a new disk template."""
2628
    # Arguments are passed to avoid configuration lookups
2629
    instance = self.instance
2630
    pnode = instance.primary_node
2631
    cluster = self.cluster
2632
    if instance.disk_template == self.op.disk_template:
2633
      raise errors.OpPrereqError("Instance already has disk template %s" %
2634
                                 instance.disk_template, errors.ECODE_INVAL)
2635

    
2636
    if (instance.disk_template,
2637
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2638
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2639
                                 " %s to %s" % (instance.disk_template,
2640
                                                self.op.disk_template),
2641
                                 errors.ECODE_INVAL)
2642
    CheckInstanceState(self, instance, INSTANCE_DOWN,
2643
                       msg="cannot change disk template")
2644
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2645
      if self.op.remote_node == pnode:
2646
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2647
                                   " as the primary node of the instance" %
2648
                                   self.op.remote_node, errors.ECODE_STATE)
2649
      CheckNodeOnline(self, self.op.remote_node)
2650
      CheckNodeNotDrained(self, self.op.remote_node)
2651
      # FIXME: here we assume that the old instance type is DT_PLAIN
2652
      assert instance.disk_template == constants.DT_PLAIN
2653
      disks = [{constants.IDISK_SIZE: d.size,
2654
                constants.IDISK_VG: d.logical_id[0]}
2655
               for d in instance.disks]
2656
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2657
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2658

    
2659
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2660
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2661
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2662
                                                              snode_group)
2663
      CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2664
                             ignore=self.op.ignore_ipolicy)
2665
      if pnode_info.group != snode_info.group:
2666
        self.LogWarning("The primary and secondary nodes are in two"
2667
                        " different node groups; the disk parameters"
2668
                        " from the first disk's node group will be"
2669
                        " used")
2670

    
2671
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2672
      # Make sure none of the nodes require exclusive storage
2673
      nodes = [pnode_info]
2674
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2675
        assert snode_info
2676
        nodes.append(snode_info)
2677
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2678
      if compat.any(map(has_es, nodes)):
2679
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2680
                  " storage is enabled" % (instance.disk_template,
2681
                                           self.op.disk_template))
2682
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2683

    
2684
  def CheckPrereq(self):
2685
    """Check prerequisites.
2686

2687
    This only checks the instance list against the existing names.
2688

2689
    """
2690
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2691
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2692

    
2693
    cluster = self.cluster = self.cfg.GetClusterInfo()
2694
    assert self.instance is not None, \
2695
      "Cannot retrieve locked instance %s" % self.op.instance_name
2696

    
2697
    pnode = instance.primary_node
2698

    
2699
    self.warn = []
2700

    
2701
    if (self.op.pnode is not None and self.op.pnode != pnode and
2702
        not self.op.force):
2703
      # verify that the instance is not up
2704
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2705
                                                  instance.hypervisor)
2706
      if instance_info.fail_msg:
2707
        self.warn.append("Can't get instance runtime information: %s" %
2708
                         instance_info.fail_msg)
2709
      elif instance_info.payload:
2710
        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2711
                                   errors.ECODE_STATE)
2712

    
2713
    assert pnode in self.owned_locks(locking.LEVEL_NODE)
2714
    nodelist = list(instance.all_nodes)
2715
    pnode_info = self.cfg.GetNodeInfo(pnode)
2716
    self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2717

    
2718
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2719
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2720
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2721

    
2722
    # dictionary with instance information after the modification
2723
    ispec = {}
2724

    
2725
    # Check disk modifications. This is done here and not in CheckArguments
2726
    # (as with NICs), because we need to know the instance's disk template
2727
    if instance.disk_template == constants.DT_EXT:
2728
      self._CheckMods("disk", self.op.disks, {},
2729
                      self._VerifyDiskModification)
2730
    else:
2731
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2732
                      self._VerifyDiskModification)
2733

    
2734
    # Prepare disk/NIC modifications
2735
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2736
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2737

    
2738
    # Check the validity of the `provider' parameter
2739
    if instance.disk_template in constants.DT_EXT:
2740
      for mod in self.diskmod:
2741
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2742
        if mod[0] == constants.DDM_ADD:
2743
          if ext_provider is None:
2744
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2745
                                       " '%s' missing, during disk add" %
2746
                                       (constants.DT_EXT,
2747
                                        constants.IDISK_PROVIDER),
2748
                                       errors.ECODE_NOENT)
2749
        elif mod[0] == constants.DDM_MODIFY:
2750
          if ext_provider:
2751
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2752
                                       " modification" %
2753
                                       constants.IDISK_PROVIDER,
2754
                                       errors.ECODE_INVAL)
2755
    else:
2756
      for mod in self.diskmod:
2757
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2758
        if ext_provider is not None:
2759
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2760
                                     " instances of type '%s'" %
2761
                                     (constants.IDISK_PROVIDER,
2762
                                      constants.DT_EXT),
2763
                                     errors.ECODE_INVAL)
2764

    
2765
    # OS change
2766
    if self.op.os_name and not self.op.force:
2767
      CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2768
                     self.op.force_variant)
2769
      instance_os = self.op.os_name
2770
    else:
2771
      instance_os = instance.os
2772

    
2773
    assert not (self.op.disk_template and self.op.disks), \
2774
      "Can't modify disk template and apply disk changes at the same time"
2775

    
2776
    if self.op.disk_template:
2777
      self._PreCheckDiskTemplate(pnode_info)
2778

    
2779
    # hvparams processing
2780
    if self.op.hvparams:
2781
      hv_type = instance.hypervisor
2782
      i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2783
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2784
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2785

    
2786
      # local check
2787
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2788
      CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2789
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2790
      self.hv_inst = i_hvdict # the new dict (without defaults)
2791
    else:
2792
      self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2793
                                              instance.hvparams)
2794
      self.hv_new = self.hv_inst = {}
2795

    
2796
    # beparams processing
2797
    if self.op.beparams:
2798
      i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2799
                                  use_none=True)
2800
      objects.UpgradeBeParams(i_bedict)
2801
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2802
      be_new = cluster.SimpleFillBE(i_bedict)
2803
      self.be_proposed = self.be_new = be_new # the new actual values
2804
      self.be_inst = i_bedict # the new dict (without defaults)
2805
    else:
2806
      self.be_new = self.be_inst = {}
2807
      self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2808
    be_old = cluster.FillBE(instance)
2809

    
2810
    # CPU param validation -- checking every time a parameter is
2811
    # changed to cover all cases where either CPU mask or vcpus have
2812
    # changed
2813
    if (constants.BE_VCPUS in self.be_proposed and
2814
        constants.HV_CPU_MASK in self.hv_proposed):
2815
      cpu_list = \
2816
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2817
      # Verify mask is consistent with number of vCPUs. Can skip this
2818
      # test if only 1 entry in the CPU mask, which means same mask
2819
      # is applied to all vCPUs.
2820
      if (len(cpu_list) > 1 and
2821
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2822
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2823
                                   " CPU mask [%s]" %
2824
                                   (self.be_proposed[constants.BE_VCPUS],
2825
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2826
                                   errors.ECODE_INVAL)
2827

    
2828
      # Only perform this test if a new CPU mask is given
2829
      if constants.HV_CPU_MASK in self.hv_new:
2830
        # Calculate the largest CPU number requested
2831
        max_requested_cpu = max(map(max, cpu_list))
2832
        # Check that all of the instance's nodes have enough physical CPUs to
2833
        # satisfy the requested CPU mask
2834
        _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2835
                                max_requested_cpu + 1, instance.hypervisor)
2836

    
2837
    # osparams processing
2838
    if self.op.osparams:
2839
      i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2840
      CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2841
      self.os_inst = i_osdict # the new dict (without defaults)
2842
    else:
2843
      self.os_inst = {}
2844

    
2845
    #TODO(dynmem): do the appropriate check involving MINMEM
2846
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2847
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2848
      mem_check_list = [pnode]
2849
      if be_new[constants.BE_AUTO_BALANCE]:
2850
        # either we changed auto_balance to yes or it was from before
2851
        mem_check_list.extend(instance.secondary_nodes)
2852
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
2853
                                                  instance.hypervisor)
2854
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2855
                                         [instance.hypervisor], False)
2856
      pninfo = nodeinfo[pnode]
2857
      msg = pninfo.fail_msg
2858
      if msg:
2859
        # Assume the primary node is unreachable and go ahead
2860
        self.warn.append("Can't get info from primary node %s: %s" %
2861
                         (pnode, msg))
2862
      else:
2863
        (_, _, (pnhvinfo, )) = pninfo.payload
2864
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2865
          self.warn.append("Node data from primary node %s doesn't contain"
2866
                           " free memory information" % pnode)
2867
        elif instance_info.fail_msg:
2868
          self.warn.append("Can't get instance runtime information: %s" %
2869
                           instance_info.fail_msg)
2870
        else:
2871
          if instance_info.payload:
2872
            current_mem = int(instance_info.payload["memory"])
2873
          else:
2874
            # Assume instance not running
2875
            # (there is a slight race condition here, but it's not very
2876
            # probable, and we have no other way to check)
2877
            # TODO: Describe race condition
2878
            current_mem = 0
2879
          #TODO(dynmem): do the appropriate check involving MINMEM
2880
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2881
                      pnhvinfo["memory_free"])
2882
          if miss_mem > 0:
2883
            raise errors.OpPrereqError("This change will prevent the instance"
2884
                                       " from starting, due to %d MB of memory"
2885
                                       " missing on its primary node" %
2886
                                       miss_mem, errors.ECODE_NORES)
2887

    
2888
      if be_new[constants.BE_AUTO_BALANCE]:
2889
        for node, nres in nodeinfo.items():
2890
          if node not in instance.secondary_nodes:
2891
            continue
2892
          nres.Raise("Can't get info from secondary node %s" % node,
2893
                     prereq=True, ecode=errors.ECODE_STATE)
2894
          (_, _, (nhvinfo, )) = nres.payload
2895
          if not isinstance(nhvinfo.get("memory_free", None), int):
2896
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2897
                                       " memory information" % node,
2898
                                       errors.ECODE_STATE)
2899
          #TODO(dynmem): do the appropriate check involving MINMEM
2900
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2901
            raise errors.OpPrereqError("This change will prevent the instance"
2902
                                       " from failover to its secondary node"
2903
                                       " %s, due to not enough memory" % node,
2904
                                       errors.ECODE_STATE)
2905

    
2906
    if self.op.runtime_mem:
2907
      remote_info = self.rpc.call_instance_info(instance.primary_node,
2908
                                                instance.name,
2909
                                                instance.hypervisor)
2910
      remote_info.Raise("Error checking node %s" % instance.primary_node)
2911
      if not remote_info.payload: # not running already
2912
        raise errors.OpPrereqError("Instance %s is not running" %
2913
                                   instance.name, errors.ECODE_STATE)
2914

    
2915
      current_memory = remote_info.payload["memory"]
2916
      if (not self.op.force and
2917
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2918
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2919
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2920
                                   " and %d MB of memory unless --force is"
2921
                                   " given" %
2922
                                   (instance.name,
2923
                                    self.be_proposed[constants.BE_MINMEM],
2924
                                    self.be_proposed[constants.BE_MAXMEM]),
2925
                                   errors.ECODE_INVAL)
2926

    
2927
      delta = self.op.runtime_mem - current_memory
2928
      if delta > 0:
2929
        CheckNodeFreeMemory(self, instance.primary_node,
2930
                            "ballooning memory for instance %s" %
2931
                            instance.name, delta, instance.hypervisor)
2932

    
2933
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2934
      raise errors.OpPrereqError("Disk operations not supported for"
2935
                                 " diskless instances", errors.ECODE_INVAL)
2936

    
2937
    def _PrepareNicCreate(_, params, private):
2938
      self._PrepareNicModification(params, private, None, None,
2939
                                   {}, cluster, pnode)
2940
      return (None, None)
2941

    
2942
    def _PrepareNicMod(_, nic, params, private):
2943
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2944
                                   nic.nicparams, cluster, pnode)
2945
      return None
2946

    
2947
    def _PrepareNicRemove(_, params, __):
2948
      ip = params.ip
2949
      net = params.network
2950
      if net is not None and ip is not None:
2951
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2952

    
2953
    # Verify NIC changes (operating on copy)
2954
    nics = instance.nics[:]
2955
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2956
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2957
    if len(nics) > constants.MAX_NICS:
2958
      raise errors.OpPrereqError("Instance has too many network interfaces"
2959
                                 " (%d), cannot add more" % constants.MAX_NICS,
2960
                                 errors.ECODE_STATE)
2961

    
2962
    def _PrepareDiskMod(_, disk, params, __):
2963
      disk.name = params.get(constants.IDISK_NAME, None)
2964

    
2965
    # Verify disk changes (operating on a copy)
2966
    disks = copy.deepcopy(instance.disks)
2967
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2968
                        _PrepareDiskMod, None)
2969
    utils.ValidateDeviceNames("disk", disks)
2970
    if len(disks) > constants.MAX_DISKS:
2971
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2972
                                 " more" % constants.MAX_DISKS,
2973
                                 errors.ECODE_STATE)
2974
    disk_sizes = [disk.size for disk in instance.disks]
2975
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2976
                      self.diskmod if op == constants.DDM_ADD)
2977
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2978
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2979

    
2980
    if self.op.offline is not None and self.op.offline:
2981
      CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2982
                         msg="can't change to offline")
2983

    
2984
    # Pre-compute NIC changes (necessary to use result in hooks)
2985
    self._nic_chgdesc = []
2986
    if self.nicmod:
2987
      # Operate on copies as this is still in prereq
2988
      nics = [nic.Copy() for nic in instance.nics]
2989
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2990
                          self._CreateNewNic, self._ApplyNicMods, None)
2991
      # Verify that NIC names are unique and valid
2992
      utils.ValidateDeviceNames("NIC", nics)
2993
      self._new_nics = nics
2994
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2995
    else:
2996
      self._new_nics = None
2997
      ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
2998

    
2999
    if not self.op.ignore_ipolicy:
3000
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
3001
                                                              group_info)
3002

    
3003
      # Fill ispec with backend parameters
3004
      ispec[constants.ISPEC_SPINDLE_USE] = \
3005
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3006
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3007
                                                         None)
3008

    
3009
      # Copy ispec to verify parameters with min/max values separately
3010
      if self.op.disk_template:
3011
        new_disk_template = self.op.disk_template
3012
      else:
3013
        new_disk_template = instance.disk_template
3014
      ispec_max = ispec.copy()
3015
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3016
        self.be_new.get(constants.BE_MAXMEM, None)
3017
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3018
                                                     new_disk_template)
3019
      ispec_min = ispec.copy()
3020
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3021
        self.be_new.get(constants.BE_MINMEM, None)
3022
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3023
                                                     new_disk_template)
3024

    
3025
      if (res_max or res_min):
3026
        # FIXME: Improve error message by including information about whether
3027
        # the upper or lower limit of the parameter fails the ipolicy.
3028
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3029
               (group_info, group_info.name,
3030
                utils.CommaJoin(set(res_max + res_min))))
3031
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3032

    
3033
  def _ConvertPlainToDrbd(self, feedback_fn):
3034
    """Converts an instance from plain to drbd.
3035

3036
    """
3037
    feedback_fn("Converting template to drbd")
3038
    instance = self.instance
3039
    pnode = instance.primary_node
3040
    snode = self.op.remote_node
3041

    
3042
    assert instance.disk_template == constants.DT_PLAIN
3043

    
3044
    # create a fake disk info for _GenerateDiskTemplate
3045
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3046
                  constants.IDISK_VG: d.logical_id[0],
3047
                  constants.IDISK_NAME: d.name}
3048
                 for d in instance.disks]
3049
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3050
                                     instance.name, pnode, [snode],
3051
                                     disk_info, None, None, 0, feedback_fn,
3052
                                     self.diskparams)
3053
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3054
                                        self.diskparams)
3055
    p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
3056
    s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
3057
    info = GetInstanceInfoText(instance)
3058
    feedback_fn("Creating additional volumes...")
3059
    # first, create the missing data and meta devices
3060
    for disk in anno_disks:
3061
      # unfortunately this is... not too nice
3062
      CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3063
                           info, True, p_excl_stor)
3064
      for child in disk.children:
3065
        CreateSingleBlockDev(self, snode, instance, child, info, True,
3066
                             s_excl_stor)
3067
    # at this stage, all new LVs have been created, we can rename the
3068
    # old ones
3069
    feedback_fn("Renaming original volumes...")
3070
    rename_list = [(o, n.children[0].logical_id)
3071
                   for (o, n) in zip(instance.disks, new_disks)]
3072
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
3073
    result.Raise("Failed to rename original LVs")
3074

    
3075
    feedback_fn("Initializing DRBD devices...")
3076
    # all child devices are in place, we can now create the DRBD devices
3077
    try:
3078
      for disk in anno_disks:
3079
        for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3080
          f_create = node == pnode
3081
          CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3082
                               excl_stor)
3083
    except errors.GenericError, e:
3084
      feedback_fn("Initializing of DRBD devices failed;"
3085
                  " renaming back original volumes...")
3086
      for disk in new_disks:
3087
        self.cfg.SetDiskID(disk, pnode)
3088
      rename_back_list = [(n.children[0], o.logical_id)
3089
                          for (n, o) in zip(new_disks, instance.disks)]
3090
      result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3091
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3092
      raise
3093

    
3094
    # at this point, the instance has been modified
3095
    instance.disk_template = constants.DT_DRBD8
3096
    instance.disks = new_disks
3097
    self.cfg.Update(instance, feedback_fn)
3098

    
3099
    # Release node locks while waiting for sync
3100
    ReleaseLocks(self, locking.LEVEL_NODE)
3101

    
3102
    # disks are created, waiting for sync
3103
    disk_abort = not WaitForSync(self, instance,
3104
                                 oneshot=not self.op.wait_for_sync)
3105
    if disk_abort:
3106
      raise errors.OpExecError("There are some degraded disks for"
3107
                               " this instance, please cleanup manually")
3108

    
3109
    # Node resource locks will be released by caller
3110

    
3111
  def _ConvertDrbdToPlain(self, feedback_fn):
3112
    """Converts an instance from drbd to plain.
3113

3114
    """
3115
    instance = self.instance
3116

    
3117
    assert len(instance.secondary_nodes) == 1
3118
    assert instance.disk_template == constants.DT_DRBD8
3119

    
3120
    pnode = instance.primary_node
3121
    snode = instance.secondary_nodes[0]
3122
    feedback_fn("Converting template to plain")
3123

    
3124
    old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3125
    new_disks = [d.children[0] for d in instance.disks]
3126

    
3127
    # copy over size, mode and name
3128
    for parent, child in zip(old_disks, new_disks):
3129
      child.size = parent.size
3130
      child.mode = parent.mode
3131
      child.name = parent.name
3132

    
3133
    # this is a DRBD disk, return its port to the pool
3134
    # NOTE: this must be done right before the call to cfg.Update!
3135
    for disk in old_disks:
3136
      tcp_port = disk.logical_id[2]
3137
      self.cfg.AddTcpUdpPort(tcp_port)
3138

    
3139
    # update instance structure
3140
    instance.disks = new_disks
3141
    instance.disk_template = constants.DT_PLAIN
3142
    _UpdateIvNames(0, instance.disks)
3143
    self.cfg.Update(instance, feedback_fn)
3144

    
3145
    # Release locks in case removing disks takes a while
3146
    ReleaseLocks(self, locking.LEVEL_NODE)
3147

    
3148
    feedback_fn("Removing volumes on the secondary node...")
3149
    for disk in old_disks:
3150
      self.cfg.SetDiskID(disk, snode)
3151
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3152
      if msg:
3153
        self.LogWarning("Could not remove block device %s on node %s,"
3154
                        " continuing anyway: %s", disk.iv_name, snode, msg)
3155

    
3156
    feedback_fn("Removing unneeded volumes on the primary node...")
3157
    for idx, disk in enumerate(old_disks):
3158
      meta = disk.children[1]
3159
      self.cfg.SetDiskID(meta, pnode)
3160
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3161
      if msg:
3162
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3163
                        " continuing anyway: %s", idx, pnode, msg)
3164

    
3165
  def _CreateNewDisk(self, idx, params, _):
3166
    """Creates a new disk.
3167

3168
    """
3169
    instance = self.instance
3170

    
3171
    # add a new disk
3172
    if instance.disk_template in constants.DTS_FILEBASED:
3173
      (file_driver, file_path) = instance.disks[0].logical_id
3174
      file_path = os.path.dirname(file_path)
3175
    else:
3176
      file_driver = file_path = None
3177

    
3178
    disk = \
3179
      GenerateDiskTemplate(self, instance.disk_template, instance.name,
3180
                           instance.primary_node, instance.secondary_nodes,
3181
                           [params], file_path, file_driver, idx,
3182
                           self.Log, self.diskparams)[0]
3183

    
3184
    new_disks = CreateDisks(self, instance, disks=[disk])
3185

    
3186
    if self.cluster.prealloc_wipe_disks:
3187
      # Wipe new disk
3188
      WipeOrCleanupDisks(self, instance,
3189
                         disks=[(idx, disk, 0)],
3190
                         cleanup=new_disks)
3191

    
3192
    return (disk, [
3193
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3194
      ])
3195

    
3196
  def _ModifyDisk(self, idx, disk, params, _):
3197
    """Modifies a disk.
3198

3199
    """
3200
    changes = []
3201
    if constants.IDISK_MODE in params:
3202
      disk.mode = params.get(constants.IDISK_MODE)
3203
      changes.append(("disk.mode/%d" % idx, disk.mode))
3204

    
3205
    if constants.IDISK_NAME in params:
3206
      disk.name = params.get(constants.IDISK_NAME)
3207
      changes.append(("disk.name/%d" % idx, disk.name))
3208

    
3209
    # Modify arbitrary params in case instance template is ext
3210
    for key, value in params.iteritems():
3211
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3212
          self.instance.disk_template == constants.DT_EXT):
3213
        # stolen from GetUpdatedParams: default means reset/delete
3214
        if value.lower() == constants.VALUE_DEFAULT:
3215
          try:
3216
            del disk.params[key]
3217
          except KeyError:
3218
            pass
3219
        else:
3220
          disk.params[key] = value
3221
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3222

    
3223
    return changes
3224

    
3225
  def _RemoveDisk(self, idx, root, _):
3226
    """Removes a disk.
3227

3228
    """
3229
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3230
    for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3231
      self.cfg.SetDiskID(disk, node)
3232
      msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3233
      if msg:
3234
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3235
                        " continuing anyway", idx, node, msg)
3236

    
3237
    # if this is a DRBD disk, return its port to the pool
3238
    if root.dev_type in constants.LDS_DRBD:
3239
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3240

    
3241
  def _CreateNewNic(self, idx, params, private):
3242
    """Creates data structure for a new network interface.
3243

3244
    """
3245
    mac = params[constants.INIC_MAC]
3246
    ip = params.get(constants.INIC_IP, None)
3247
    net = params.get(constants.INIC_NETWORK, None)
3248
    name = params.get(constants.INIC_NAME, None)
3249
    net_uuid = self.cfg.LookupNetwork(net)
3250
    #TODO: not private.filled?? can a nic have no nicparams??
3251
    nicparams = private.filled
3252
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3253
                       nicparams=nicparams)
3254
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3255

    
3256
    return (nobj, [
3257
      ("nic.%d" % idx,
3258
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3259
       (mac, ip, private.filled[constants.NIC_MODE],
3260
       private.filled[constants.NIC_LINK],
3261
       net)),
3262
      ])
3263

    
3264
  def _ApplyNicMods(self, idx, nic, params, private):
3265
    """Modifies a network interface.
3266

3267
    """
3268
    changes = []
3269

    
3270
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3271
      if key in params:
3272
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3273
        setattr(nic, key, params[key])
3274

    
3275
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3276
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3277
    if new_net_uuid != nic.network:
3278
      changes.append(("nic.network/%d" % idx, new_net))
3279
      nic.network = new_net_uuid
3280

    
3281
    if private.filled:
3282
      nic.nicparams = private.filled
3283

    
3284
      for (key, val) in nic.nicparams.items():
3285
        changes.append(("nic.%s/%d" % (key, idx), val))
3286

    
3287
    return changes
3288

    
3289
  def Exec(self, feedback_fn):
3290
    """Modifies an instance.
3291

3292
    All parameters take effect only at the next restart of the instance.
3293

3294
    """
3295
    # Process here the warnings from CheckPrereq, as we don't have a
3296
    # feedback_fn there.
3297
    # TODO: Replace with self.LogWarning
3298
    for warn in self.warn:
3299
      feedback_fn("WARNING: %s" % warn)
3300

    
3301
    assert ((self.op.disk_template is None) ^
3302
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3303
      "Not owning any node resource locks"
3304

    
3305
    result = []
3306
    instance = self.instance
3307

    
3308
    # New primary node
3309
    if self.op.pnode:
3310
      instance.primary_node = self.op.pnode
3311

    
3312
    # runtime memory
3313
    if self.op.runtime_mem:
3314
      rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3315
                                                     instance,
3316
                                                     self.op.runtime_mem)
3317
      rpcres.Raise("Cannot modify instance runtime memory")
3318
      result.append(("runtime_memory", self.op.runtime_mem))
3319

    
3320
    # Apply disk changes
3321
    _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3322
                        self._CreateNewDisk, self._ModifyDisk,
3323
                        self._RemoveDisk)
3324
    _UpdateIvNames(0, instance.disks)
3325

    
3326
    if self.op.disk_template:
3327
      if __debug__:
3328
        check_nodes = set(instance.all_nodes)
3329
        if self.op.remote_node:
3330
          check_nodes.add(self.op.remote_node)
3331
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3332
          owned = self.owned_locks(level)
3333
          assert not (check_nodes - owned), \
3334
            ("Not owning the correct locks, owning %r, expected at least %r" %
3335
             (owned, check_nodes))
3336

    
3337
      r_shut = ShutdownInstanceDisks(self, instance)
3338
      if not r_shut:
3339
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3340
                                 " proceed with disk template conversion")
3341
      mode = (instance.disk_template, self.op.disk_template)
3342
      try:
3343
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3344
      except:
3345
        self.cfg.ReleaseDRBDMinors(instance.name)
3346
        raise
3347
      result.append(("disk_template", self.op.disk_template))
3348

    
3349
      assert instance.disk_template == self.op.disk_template, \
3350
        ("Expected disk template '%s', found '%s'" %
3351
         (self.op.disk_template, instance.disk_template))
3352

    
3353
    # Release node and resource locks if there are any (they might already have
3354
    # been released during disk conversion)
3355
    ReleaseLocks(self, locking.LEVEL_NODE)
3356
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3357

    
3358
    # Apply NIC changes
3359
    if self._new_nics is not None:
3360
      instance.nics = self._new_nics
3361
      result.extend(self._nic_chgdesc)
3362

    
3363
    # hvparams changes
3364
    if self.op.hvparams:
3365
      instance.hvparams = self.hv_inst
3366
      for key, val in self.op.hvparams.iteritems():
3367
        result.append(("hv/%s" % key, val))
3368

    
3369
    # beparams changes
3370
    if self.op.beparams:
3371
      instance.beparams = self.be_inst
3372
      for key, val in self.op.beparams.iteritems():
3373
        result.append(("be/%s" % key, val))
3374

    
3375
    # OS change
3376
    if self.op.os_name:
3377
      instance.os = self.op.os_name
3378

    
3379
    # osparams changes
3380
    if self.op.osparams:
3381
      instance.osparams = self.os_inst
3382
      for key, val in self.op.osparams.iteritems():
3383
        result.append(("os/%s" % key, val))
3384

    
3385
    if self.op.offline is None:
3386
      # Ignore
3387
      pass
3388
    elif self.op.offline:
3389
      # Mark instance as offline
3390
      self.cfg.MarkInstanceOffline(instance.name)
3391
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3392
    else:
3393
      # Mark instance as online, but stopped
3394
      self.cfg.MarkInstanceDown(instance.name)
3395
      result.append(("admin_state", constants.ADMINST_DOWN))
3396

    
3397
    self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3398

    
3399
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3400
                self.owned_locks(locking.LEVEL_NODE)), \
3401
      "All node locks should have been released by now"
3402

    
3403
    return result
3404

    
3405
  _DISK_CONVERSIONS = {
3406
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3407
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3408
    }
3409

    
3410

    
3411
class LUInstanceChangeGroup(LogicalUnit):
3412
  HPATH = "instance-change-group"
3413
  HTYPE = constants.HTYPE_INSTANCE
3414
  REQ_BGL = False
3415

    
3416
  def ExpandNames(self):
3417
    self.share_locks = ShareAll()
3418

    
3419
    self.needed_locks = {
3420
      locking.LEVEL_NODEGROUP: [],
3421
      locking.LEVEL_NODE: [],
3422
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3423
      }
3424

    
3425
    self._ExpandAndLockInstance()
3426

    
3427
    if self.op.target_groups:
3428
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3429
                                  self.op.target_groups)
3430
    else:
3431
      self.req_target_uuids = None
3432

    
3433
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3434

    
3435
  def DeclareLocks(self, level):
3436
    if level == locking.LEVEL_NODEGROUP:
3437
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3438

    
3439
      if self.req_target_uuids:
3440
        lock_groups = set(self.req_target_uuids)
3441

    
3442
        # Lock all groups used by instance optimistically; this requires going
3443
        # via the node before it's locked, requiring verification later on
3444
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3445
        lock_groups.update(instance_groups)
3446
      else:
3447
        # No target groups, need to lock all of them
3448
        lock_groups = locking.ALL_SET
3449

    
3450
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3451

    
3452
    elif level == locking.LEVEL_NODE:
3453
      if self.req_target_uuids:
3454
        # Lock all nodes used by instances
3455
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3456
        self._LockInstancesNodes()
3457

    
3458
        # Lock all nodes in all potential target groups
3459
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3460
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3461
        member_nodes = [node_name
3462
                        for group in lock_groups
3463
                        for node_name in self.cfg.GetNodeGroup(group).members]
3464
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3465
      else:
3466
        # Lock all nodes as all groups are potential targets
3467
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3468

    
3469
  def CheckPrereq(self):
3470
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3471
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3472
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3473

    
3474
    assert (self.req_target_uuids is None or
3475
            owned_groups.issuperset(self.req_target_uuids))
3476
    assert owned_instances == set([self.op.instance_name])
3477

    
3478
    # Get instance information
3479
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3480

    
3481
    # Check if node groups for locked instance are still correct
3482
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3483
      ("Instance %s's nodes changed while we kept the lock" %
3484
       self.op.instance_name)
3485

    
3486
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3487
                                          owned_groups)
3488

    
3489
    if self.req_target_uuids:
3490
      # User requested specific target groups
3491
      self.target_uuids = frozenset(self.req_target_uuids)
3492
    else:
3493
      # All groups except those used by the instance are potential targets
3494
      self.target_uuids = owned_groups - inst_groups
3495

    
3496
    conflicting_groups = self.target_uuids & inst_groups
3497
    if conflicting_groups:
3498
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3499
                                 " used by the instance '%s'" %
3500
                                 (utils.CommaJoin(conflicting_groups),
3501
                                  self.op.instance_name),
3502
                                 errors.ECODE_INVAL)
3503

    
3504
    if not self.target_uuids:
3505
      raise errors.OpPrereqError("There are no possible target groups",
3506
                                 errors.ECODE_INVAL)
3507

    
3508
  def BuildHooksEnv(self):
3509
    """Build hooks env.
3510

3511
    """
3512
    assert self.target_uuids
3513

    
3514
    env = {
3515
      "TARGET_GROUPS": " ".join(self.target_uuids),
3516
      }
3517

    
3518
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3519

    
3520
    return env
3521

    
3522
  def BuildHooksNodes(self):
3523
    """Build hooks nodes.
3524

3525
    """
3526
    mn = self.cfg.GetMasterNode()
3527
    return ([mn], [mn])
3528

    
3529
  def Exec(self, feedback_fn):
3530
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3531

    
3532
    assert instances == [self.op.instance_name], "Instance not locked"
3533

    
3534
    req = iallocator.IAReqGroupChange(instances=instances,
3535
                                      target_groups=list(self.target_uuids))
3536
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3537

    
3538
    ial.Run(self.op.iallocator)
3539

    
3540
    if not ial.success:
3541
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3542
                                 " instance '%s' using iallocator '%s': %s" %
3543
                                 (self.op.instance_name, self.op.iallocator,
3544
                                  ial.info), errors.ECODE_NORES)
3545

    
3546
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3547

    
3548
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3549
                 " instance '%s'", len(jobs), self.op.instance_name)
3550

    
3551
    return ResultWithJobs(jobs)