Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 87e25be1

History | View | Annotate | Download (163.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import itertools
27
import logging
28
import operator
29
import os
30

    
31
from ganeti import compat
32
from ganeti import constants
33
from ganeti import errors
34
from ganeti import ht
35
from ganeti import hypervisor
36
from ganeti import locking
37
from ganeti.masterd import iallocator
38
from ganeti import masterd
39
from ganeti import netutils
40
from ganeti import objects
41
from ganeti import opcodes
42
from ganeti import pathutils
43
from ganeti import qlang
44
from ganeti import rpc
45
from ganeti import utils
46
from ganeti import query
47

    
48
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, _QueryBase, \
49
  ResultWithJobs
50

    
51
from ganeti.cmdlib.common import INSTANCE_ONLINE, INSTANCE_DOWN, \
52
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, _CheckNodeOnline, \
53
  _ShareAll, _GetDefaultIAllocator, _CheckInstanceNodeGroups, \
54
  _LoadNodeEvacResult, _CheckIAllocatorOrNode, _CheckParamsNotGlobal, \
55
  _IsExclusiveStorageEnabledNode, _CheckHVParams, _CheckOSParams, \
56
  _GetWantedInstances, _CheckInstancesNodeGroups, _AnnotateDiskParams, \
57
  _GetUpdatedParams, _ExpandInstanceName, _ComputeIPolicySpecViolation, \
58
  _CheckInstanceState, _ExpandNodeName
59
from ganeti.cmdlib.instance_storage import _CreateDisks, \
60
  _CheckNodesFreeDiskPerVG, _WipeDisks, _WaitForSync, \
61
  _IsExclusiveStorageEnabledNodeName, _CreateSingleBlockDev, _ComputeDisks, \
62
  _CheckRADOSFreeSpace, _ComputeDiskSizePerVG, _GenerateDiskTemplate, \
63
  _CreateBlockDev, _StartInstanceDisks, _ShutdownInstanceDisks, \
64
  _AssembleInstanceDisks
65
from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \
66
  _GetClusterDomainSecret, _BuildInstanceHookEnv, _NICListToTuple, \
67
  _NICToTuple, _CheckNodeNotDrained, _RemoveInstance, _CopyLockList, \
68
  _ReleaseLocks, _CheckNodeVmCapable, _CheckTargetNodeIPolicy, \
69
  _GetInstanceInfoText, _RemoveDisks, _CheckNodeFreeMemory, \
70
  _CheckInstanceBridgesExist, _CheckNicsBridgesExist
71

    
72
import ganeti.masterd.instance
73

    
74

    
75
#: Type description for changes as returned by L{ApplyContainerMods}'s
76
#: callbacks
77
_TApplyContModsCbChanges = \
78
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
79
    ht.TNonEmptyString,
80
    ht.TAny,
81
    ])))
82

    
83

    
84
def _CheckHostnameSane(lu, name):
85
  """Ensures that a given hostname resolves to a 'sane' name.
86

87
  The given name is required to be a prefix of the resolved hostname,
88
  to prevent accidental mismatches.
89

90
  @param lu: the logical unit on behalf of which we're checking
91
  @param name: the name we should resolve and check
92
  @return: the resolved hostname object
93

94
  """
95
  hostname = netutils.GetHostname(name=name)
96
  if hostname.name != name:
97
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
98
  if not utils.MatchNameComponent(name, [hostname.name]):
99
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
100
                                " same as given hostname '%s'") %
101
                               (hostname.name, name), errors.ECODE_INVAL)
102
  return hostname
103

    
104

    
105
def _CheckOpportunisticLocking(op):
106
  """Generate error if opportunistic locking is not possible.
107

108
  """
109
  if op.opportunistic_locking and not op.iallocator:
110
    raise errors.OpPrereqError("Opportunistic locking is only available in"
111
                               " combination with an instance allocator",
112
                               errors.ECODE_INVAL)
113

    
114

    
115
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
116
  """Wrapper around IAReqInstanceAlloc.
117

118
  @param op: The instance opcode
119
  @param disks: The computed disks
120
  @param nics: The computed nics
121
  @param beparams: The full filled beparams
122
  @param node_whitelist: List of nodes which should appear as online to the
123
    allocator (unless the node is already marked offline)
124

125
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
126

127
  """
128
  spindle_use = beparams[constants.BE_SPINDLE_USE]
129
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
130
                                       disk_template=op.disk_template,
131
                                       tags=op.tags,
132
                                       os=op.os_type,
133
                                       vcpus=beparams[constants.BE_VCPUS],
134
                                       memory=beparams[constants.BE_MAXMEM],
135
                                       spindle_use=spindle_use,
136
                                       disks=disks,
137
                                       nics=[n.ToDict() for n in nics],
138
                                       hypervisor=op.hypervisor,
139
                                       node_whitelist=node_whitelist)
140

    
141

    
142
def _ComputeFullBeParams(op, cluster):
143
  """Computes the full beparams.
144

145
  @param op: The instance opcode
146
  @param cluster: The cluster config object
147

148
  @return: The fully filled beparams
149

150
  """
151
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
152
  for param, value in op.beparams.iteritems():
153
    if value == constants.VALUE_AUTO:
154
      op.beparams[param] = default_beparams[param]
155
  objects.UpgradeBeParams(op.beparams)
156
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
157
  return cluster.SimpleFillBE(op.beparams)
158

    
159

    
160
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
161
  """Computes the nics.
162

163
  @param op: The instance opcode
164
  @param cluster: Cluster configuration object
165
  @param default_ip: The default ip to assign
166
  @param cfg: An instance of the configuration object
167
  @param ec_id: Execution context ID
168

169
  @returns: The build up nics
170

171
  """
172
  nics = []
173
  for nic in op.nics:
174
    nic_mode_req = nic.get(constants.INIC_MODE, None)
175
    nic_mode = nic_mode_req
176
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
177
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
178

    
179
    net = nic.get(constants.INIC_NETWORK, None)
180
    link = nic.get(constants.NIC_LINK, None)
181
    ip = nic.get(constants.INIC_IP, None)
182

    
183
    if net is None or net.lower() == constants.VALUE_NONE:
184
      net = None
185
    else:
186
      if nic_mode_req is not None or link is not None:
187
        raise errors.OpPrereqError("If network is given, no mode or link"
188
                                   " is allowed to be passed",
189
                                   errors.ECODE_INVAL)
190

    
191
    # ip validity checks
192
    if ip is None or ip.lower() == constants.VALUE_NONE:
193
      nic_ip = None
194
    elif ip.lower() == constants.VALUE_AUTO:
195
      if not op.name_check:
196
        raise errors.OpPrereqError("IP address set to auto but name checks"
197
                                   " have been skipped",
198
                                   errors.ECODE_INVAL)
199
      nic_ip = default_ip
200
    else:
201
      # We defer pool operations until later, so that the iallocator has
202
      # filled in the instance's node(s) dimara
203
      if ip.lower() == constants.NIC_IP_POOL:
204
        if net is None:
205
          raise errors.OpPrereqError("if ip=pool, parameter network"
206
                                     " must be passed too",
207
                                     errors.ECODE_INVAL)
208

    
209
      elif not netutils.IPAddress.IsValid(ip):
210
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
211
                                   errors.ECODE_INVAL)
212

    
213
      nic_ip = ip
214

    
215
    # TODO: check the ip address for uniqueness
216
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
217
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
218
                                 errors.ECODE_INVAL)
219

    
220
    # MAC address verification
221
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
222
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
223
      mac = utils.NormalizeAndValidateMac(mac)
224

    
225
      try:
226
        # TODO: We need to factor this out
227
        cfg.ReserveMAC(mac, ec_id)
228
      except errors.ReservationError:
229
        raise errors.OpPrereqError("MAC address %s already in use"
230
                                   " in cluster" % mac,
231
                                   errors.ECODE_NOTUNIQUE)
232

    
233
    #  Build nic parameters
234
    nicparams = {}
235
    if nic_mode_req:
236
      nicparams[constants.NIC_MODE] = nic_mode
237
    if link:
238
      nicparams[constants.NIC_LINK] = link
239

    
240
    check_params = cluster.SimpleFillNIC(nicparams)
241
    objects.NIC.CheckParameterSyntax(check_params)
242
    net_uuid = cfg.LookupNetwork(net)
243
    name = nic.get(constants.INIC_NAME, None)
244
    if name is not None and name.lower() == constants.VALUE_NONE:
245
      name = None
246
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
247
                          network=net_uuid, nicparams=nicparams)
248
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
249
    nics.append(nic_obj)
250

    
251
  return nics
252

    
253

    
254
def _CheckForConflictingIp(lu, ip, node):
255
  """In case of conflicting IP address raise error.
256

257
  @type ip: string
258
  @param ip: IP address
259
  @type node: string
260
  @param node: node name
261

262
  """
263
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
264
  if conf_net is not None:
265
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
266
                                " network %s, but the target NIC does not." %
267
                                (ip, conf_net)),
268
                               errors.ECODE_STATE)
269

    
270
  return (None, None)
271

    
272

    
273
def _ComputeIPolicyInstanceSpecViolation(
274
  ipolicy, instance_spec, disk_template,
275
  _compute_fn=_ComputeIPolicySpecViolation):
276
  """Compute if instance specs meets the specs of ipolicy.
277

278
  @type ipolicy: dict
279
  @param ipolicy: The ipolicy to verify against
280
  @param instance_spec: dict
281
  @param instance_spec: The instance spec to verify
282
  @type disk_template: string
283
  @param disk_template: the disk template of the instance
284
  @param _compute_fn: The function to verify ipolicy (unittest only)
285
  @see: L{_ComputeIPolicySpecViolation}
286

287
  """
288
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
289
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
290
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
291
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
292
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
293
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
294

    
295
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
296
                     disk_sizes, spindle_use, disk_template)
297

    
298

    
299
def _CheckOSVariant(os_obj, name):
300
  """Check whether an OS name conforms to the os variants specification.
301

302
  @type os_obj: L{objects.OS}
303
  @param os_obj: OS object to check
304
  @type name: string
305
  @param name: OS name passed by the user, to check for validity
306

307
  """
308
  variant = objects.OS.GetVariant(name)
309
  if not os_obj.supported_variants:
310
    if variant:
311
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
312
                                 " passed)" % (os_obj.name, variant),
313
                                 errors.ECODE_INVAL)
314
    return
315
  if not variant:
316
    raise errors.OpPrereqError("OS name must include a variant",
317
                               errors.ECODE_INVAL)
318

    
319
  if variant not in os_obj.supported_variants:
320
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
321

    
322

    
323
def _CheckNodeHasOS(lu, node, os_name, force_variant):
324
  """Ensure that a node supports a given OS.
325

326
  @param lu: the LU on behalf of which we make the check
327
  @param node: the node to check
328
  @param os_name: the OS to query about
329
  @param force_variant: whether to ignore variant errors
330
  @raise errors.OpPrereqError: if the node is not supporting the OS
331

332
  """
333
  result = lu.rpc.call_os_get(node, os_name)
334
  result.Raise("OS '%s' not in supported OS list for node %s" %
335
               (os_name, node),
336
               prereq=True, ecode=errors.ECODE_INVAL)
337
  if not force_variant:
338
    _CheckOSVariant(result.payload, os_name)
339

    
340

    
341
class LUInstanceCreate(LogicalUnit):
342
  """Create an instance.
343

344
  """
345
  HPATH = "instance-add"
346
  HTYPE = constants.HTYPE_INSTANCE
347
  REQ_BGL = False
348

    
349
  def CheckArguments(self):
350
    """Check arguments.
351

352
    """
353
    # do not require name_check to ease forward/backward compatibility
354
    # for tools
355
    if self.op.no_install and self.op.start:
356
      self.LogInfo("No-installation mode selected, disabling startup")
357
      self.op.start = False
358
    # validate/normalize the instance name
359
    self.op.instance_name = \
360
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
361

    
362
    if self.op.ip_check and not self.op.name_check:
363
      # TODO: make the ip check more flexible and not depend on the name check
364
      raise errors.OpPrereqError("Cannot do IP address check without a name"
365
                                 " check", errors.ECODE_INVAL)
366

    
367
    # check nics' parameter names
368
    for nic in self.op.nics:
369
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
370
    # check that NIC's parameters names are unique and valid
371
    utils.ValidateDeviceNames("NIC", self.op.nics)
372

    
373
    # check that disk's names are unique and valid
374
    utils.ValidateDeviceNames("disk", self.op.disks)
375

    
376
    cluster = self.cfg.GetClusterInfo()
377
    if not self.op.disk_template in cluster.enabled_disk_templates:
378
      raise errors.OpPrereqError("Cannot create an instance with disk template"
379
                                 " '%s', because it is not enabled in the"
380
                                 " cluster. Enabled disk templates are: %s." %
381
                                 (self.op.disk_template,
382
                                  ",".join(cluster.enabled_disk_templates)))
383

    
384
    # check disks. parameter names and consistent adopt/no-adopt strategy
385
    has_adopt = has_no_adopt = False
386
    for disk in self.op.disks:
387
      if self.op.disk_template != constants.DT_EXT:
388
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
389
      if constants.IDISK_ADOPT in disk:
390
        has_adopt = True
391
      else:
392
        has_no_adopt = True
393
    if has_adopt and has_no_adopt:
394
      raise errors.OpPrereqError("Either all disks are adopted or none is",
395
                                 errors.ECODE_INVAL)
396
    if has_adopt:
397
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
398
        raise errors.OpPrereqError("Disk adoption is not supported for the"
399
                                   " '%s' disk template" %
400
                                   self.op.disk_template,
401
                                   errors.ECODE_INVAL)
402
      if self.op.iallocator is not None:
403
        raise errors.OpPrereqError("Disk adoption not allowed with an"
404
                                   " iallocator script", errors.ECODE_INVAL)
405
      if self.op.mode == constants.INSTANCE_IMPORT:
406
        raise errors.OpPrereqError("Disk adoption not allowed for"
407
                                   " instance import", errors.ECODE_INVAL)
408
    else:
409
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
410
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
411
                                   " but no 'adopt' parameter given" %
412
                                   self.op.disk_template,
413
                                   errors.ECODE_INVAL)
414

    
415
    self.adopt_disks = has_adopt
416

    
417
    # instance name verification
418
    if self.op.name_check:
419
      self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
420
      self.op.instance_name = self.hostname1.name
421
      # used in CheckPrereq for ip ping check
422
      self.check_ip = self.hostname1.ip
423
    else:
424
      self.check_ip = None
425

    
426
    # file storage checks
427
    if (self.op.file_driver and
428
        not self.op.file_driver in constants.FILE_DRIVER):
429
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
430
                                 self.op.file_driver, errors.ECODE_INVAL)
431

    
432
    if self.op.disk_template == constants.DT_FILE:
433
      opcodes.RequireFileStorage()
434
    elif self.op.disk_template == constants.DT_SHARED_FILE:
435
      opcodes.RequireSharedFileStorage()
436

    
437
    ### Node/iallocator related checks
438
    _CheckIAllocatorOrNode(self, "iallocator", "pnode")
439

    
440
    if self.op.pnode is not None:
441
      if self.op.disk_template in constants.DTS_INT_MIRROR:
442
        if self.op.snode is None:
443
          raise errors.OpPrereqError("The networked disk templates need"
444
                                     " a mirror node", errors.ECODE_INVAL)
445
      elif self.op.snode:
446
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
447
                        " template")
448
        self.op.snode = None
449

    
450
    _CheckOpportunisticLocking(self.op)
451

    
452
    self._cds = _GetClusterDomainSecret()
453

    
454
    if self.op.mode == constants.INSTANCE_IMPORT:
455
      # On import force_variant must be True, because if we forced it at
456
      # initial install, our only chance when importing it back is that it
457
      # works again!
458
      self.op.force_variant = True
459

    
460
      if self.op.no_install:
461
        self.LogInfo("No-installation mode has no effect during import")
462

    
463
    elif self.op.mode == constants.INSTANCE_CREATE:
464
      if self.op.os_type is None:
465
        raise errors.OpPrereqError("No guest OS specified",
466
                                   errors.ECODE_INVAL)
467
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
468
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
469
                                   " installation" % self.op.os_type,
470
                                   errors.ECODE_STATE)
471
      if self.op.disk_template is None:
472
        raise errors.OpPrereqError("No disk template specified",
473
                                   errors.ECODE_INVAL)
474

    
475
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
476
      # Check handshake to ensure both clusters have the same domain secret
477
      src_handshake = self.op.source_handshake
478
      if not src_handshake:
479
        raise errors.OpPrereqError("Missing source handshake",
480
                                   errors.ECODE_INVAL)
481

    
482
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
483
                                                           src_handshake)
484
      if errmsg:
485
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
486
                                   errors.ECODE_INVAL)
487

    
488
      # Load and check source CA
489
      self.source_x509_ca_pem = self.op.source_x509_ca
490
      if not self.source_x509_ca_pem:
491
        raise errors.OpPrereqError("Missing source X509 CA",
492
                                   errors.ECODE_INVAL)
493

    
494
      try:
495
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
496
                                                    self._cds)
497
      except OpenSSL.crypto.Error, err:
498
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
499
                                   (err, ), errors.ECODE_INVAL)
500

    
501
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
502
      if errcode is not None:
503
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
504
                                   errors.ECODE_INVAL)
505

    
506
      self.source_x509_ca = cert
507

    
508
      src_instance_name = self.op.source_instance_name
509
      if not src_instance_name:
510
        raise errors.OpPrereqError("Missing source instance name",
511
                                   errors.ECODE_INVAL)
512

    
513
      self.source_instance_name = \
514
        netutils.GetHostname(name=src_instance_name).name
515

    
516
    else:
517
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
518
                                 self.op.mode, errors.ECODE_INVAL)
519

    
520
  def ExpandNames(self):
521
    """ExpandNames for CreateInstance.
522

523
    Figure out the right locks for instance creation.
524

525
    """
526
    self.needed_locks = {}
527

    
528
    instance_name = self.op.instance_name
529
    # this is just a preventive check, but someone might still add this
530
    # instance in the meantime, and creation will fail at lock-add time
531
    if instance_name in self.cfg.GetInstanceList():
532
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
533
                                 instance_name, errors.ECODE_EXISTS)
534

    
535
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
536

    
537
    if self.op.iallocator:
538
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
539
      # specifying a group on instance creation and then selecting nodes from
540
      # that group
541
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
542
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
543

    
544
      if self.op.opportunistic_locking:
545
        self.opportunistic_locks[locking.LEVEL_NODE] = True
546
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
547
    else:
548
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
549
      nodelist = [self.op.pnode]
550
      if self.op.snode is not None:
551
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
552
        nodelist.append(self.op.snode)
553
      self.needed_locks[locking.LEVEL_NODE] = nodelist
554

    
555
    # in case of import lock the source node too
556
    if self.op.mode == constants.INSTANCE_IMPORT:
557
      src_node = self.op.src_node
558
      src_path = self.op.src_path
559

    
560
      if src_path is None:
561
        self.op.src_path = src_path = self.op.instance_name
562

    
563
      if src_node is None:
564
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
565
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
566
        self.op.src_node = None
567
        if os.path.isabs(src_path):
568
          raise errors.OpPrereqError("Importing an instance from a path"
569
                                     " requires a source node option",
570
                                     errors.ECODE_INVAL)
571
      else:
572
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
573
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
574
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
575
        if not os.path.isabs(src_path):
576
          self.op.src_path = src_path = \
577
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
578

    
579
    self.needed_locks[locking.LEVEL_NODE_RES] = \
580
      _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
581

    
582
  def _RunAllocator(self):
583
    """Run the allocator based on input opcode.
584

585
    """
586
    if self.op.opportunistic_locking:
587
      # Only consider nodes for which a lock is held
588
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
589
    else:
590
      node_whitelist = None
591

    
592
    #TODO Export network to iallocator so that it chooses a pnode
593
    #     in a nodegroup that has the desired network connected to
594
    req = _CreateInstanceAllocRequest(self.op, self.disks,
595
                                      self.nics, self.be_full,
596
                                      node_whitelist)
597
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
598

    
599
    ial.Run(self.op.iallocator)
600

    
601
    if not ial.success:
602
      # When opportunistic locks are used only a temporary failure is generated
603
      if self.op.opportunistic_locking:
604
        ecode = errors.ECODE_TEMP_NORES
605
      else:
606
        ecode = errors.ECODE_NORES
607

    
608
      raise errors.OpPrereqError("Can't compute nodes using"
609
                                 " iallocator '%s': %s" %
610
                                 (self.op.iallocator, ial.info),
611
                                 ecode)
612

    
613
    self.op.pnode = ial.result[0]
614
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
615
                 self.op.instance_name, self.op.iallocator,
616
                 utils.CommaJoin(ial.result))
617

    
618
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
619

    
620
    if req.RequiredNodes() == 2:
621
      self.op.snode = ial.result[1]
622

    
623
  def BuildHooksEnv(self):
624
    """Build hooks env.
625

626
    This runs on master, primary and secondary nodes of the instance.
627

628
    """
629
    env = {
630
      "ADD_MODE": self.op.mode,
631
      }
632
    if self.op.mode == constants.INSTANCE_IMPORT:
633
      env["SRC_NODE"] = self.op.src_node
634
      env["SRC_PATH"] = self.op.src_path
635
      env["SRC_IMAGES"] = self.src_images
636

    
637
    env.update(_BuildInstanceHookEnv(
638
      name=self.op.instance_name,
639
      primary_node=self.op.pnode,
640
      secondary_nodes=self.secondaries,
641
      status=self.op.start,
642
      os_type=self.op.os_type,
643
      minmem=self.be_full[constants.BE_MINMEM],
644
      maxmem=self.be_full[constants.BE_MAXMEM],
645
      vcpus=self.be_full[constants.BE_VCPUS],
646
      nics=_NICListToTuple(self, self.nics),
647
      disk_template=self.op.disk_template,
648
      disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
649
              d[constants.IDISK_MODE]) for d in self.disks],
650
      bep=self.be_full,
651
      hvp=self.hv_full,
652
      hypervisor_name=self.op.hypervisor,
653
      tags=self.op.tags,
654
      ))
655

    
656
    return env
657

    
658
  def BuildHooksNodes(self):
659
    """Build hooks nodes.
660

661
    """
662
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
663
    return nl, nl
664

    
665
  def _ReadExportInfo(self):
666
    """Reads the export information from disk.
667

668
    It will override the opcode source node and path with the actual
669
    information, if these two were not specified before.
670

671
    @return: the export information
672

673
    """
674
    assert self.op.mode == constants.INSTANCE_IMPORT
675

    
676
    src_node = self.op.src_node
677
    src_path = self.op.src_path
678

    
679
    if src_node is None:
680
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
681
      exp_list = self.rpc.call_export_list(locked_nodes)
682
      found = False
683
      for node in exp_list:
684
        if exp_list[node].fail_msg:
685
          continue
686
        if src_path in exp_list[node].payload:
687
          found = True
688
          self.op.src_node = src_node = node
689
          self.op.src_path = src_path = utils.PathJoin(pathutils.EXPORT_DIR,
690
                                                       src_path)
691
          break
692
      if not found:
693
        raise errors.OpPrereqError("No export found for relative path %s" %
694
                                   src_path, errors.ECODE_INVAL)
695

    
696
    _CheckNodeOnline(self, src_node)
697
    result = self.rpc.call_export_info(src_node, src_path)
698
    result.Raise("No export or invalid export found in dir %s" % src_path)
699

    
700
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
701
    if not export_info.has_section(constants.INISECT_EXP):
702
      raise errors.ProgrammerError("Corrupted export config",
703
                                   errors.ECODE_ENVIRON)
704

    
705
    ei_version = export_info.get(constants.INISECT_EXP, "version")
706
    if (int(ei_version) != constants.EXPORT_VERSION):
707
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
708
                                 (ei_version, constants.EXPORT_VERSION),
709
                                 errors.ECODE_ENVIRON)
710
    return export_info
711

    
712
  def _ReadExportParams(self, einfo):
713
    """Use export parameters as defaults.
714

715
    In case the opcode doesn't specify (as in override) some instance
716
    parameters, then try to use them from the export information, if
717
    that declares them.
718

719
    """
720
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
721

    
722
    if self.op.disk_template is None:
723
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
724
        self.op.disk_template = einfo.get(constants.INISECT_INS,
725
                                          "disk_template")
726
        if self.op.disk_template not in constants.DISK_TEMPLATES:
727
          raise errors.OpPrereqError("Disk template specified in configuration"
728
                                     " file is not one of the allowed values:"
729
                                     " %s" %
730
                                     " ".join(constants.DISK_TEMPLATES),
731
                                     errors.ECODE_INVAL)
732
      else:
733
        raise errors.OpPrereqError("No disk template specified and the export"
734
                                   " is missing the disk_template information",
735
                                   errors.ECODE_INVAL)
736

    
737
    if not self.op.disks:
738
      disks = []
739
      # TODO: import the disk iv_name too
740
      for idx in range(constants.MAX_DISKS):
741
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
742
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
743
          disks.append({constants.IDISK_SIZE: disk_sz})
744
      self.op.disks = disks
745
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
746
        raise errors.OpPrereqError("No disk info specified and the export"
747
                                   " is missing the disk information",
748
                                   errors.ECODE_INVAL)
749

    
750
    if not self.op.nics:
751
      nics = []
752
      for idx in range(constants.MAX_NICS):
753
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
754
          ndict = {}
755
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
756
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
757
            ndict[name] = v
758
          nics.append(ndict)
759
        else:
760
          break
761
      self.op.nics = nics
762

    
763
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
764
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
765

    
766
    if (self.op.hypervisor is None and
767
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
768
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
769

    
770
    if einfo.has_section(constants.INISECT_HYP):
771
      # use the export parameters but do not override the ones
772
      # specified by the user
773
      for name, value in einfo.items(constants.INISECT_HYP):
774
        if name not in self.op.hvparams:
775
          self.op.hvparams[name] = value
776

    
777
    if einfo.has_section(constants.INISECT_BEP):
778
      # use the parameters, without overriding
779
      for name, value in einfo.items(constants.INISECT_BEP):
780
        if name not in self.op.beparams:
781
          self.op.beparams[name] = value
782
        # Compatibility for the old "memory" be param
783
        if name == constants.BE_MEMORY:
784
          if constants.BE_MAXMEM not in self.op.beparams:
785
            self.op.beparams[constants.BE_MAXMEM] = value
786
          if constants.BE_MINMEM not in self.op.beparams:
787
            self.op.beparams[constants.BE_MINMEM] = value
788
    else:
789
      # try to read the parameters old style, from the main section
790
      for name in constants.BES_PARAMETERS:
791
        if (name not in self.op.beparams and
792
            einfo.has_option(constants.INISECT_INS, name)):
793
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
794

    
795
    if einfo.has_section(constants.INISECT_OSP):
796
      # use the parameters, without overriding
797
      for name, value in einfo.items(constants.INISECT_OSP):
798
        if name not in self.op.osparams:
799
          self.op.osparams[name] = value
800

    
801
  def _RevertToDefaults(self, cluster):
802
    """Revert the instance parameters to the default values.
803

804
    """
805
    # hvparams
806
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
807
    for name in self.op.hvparams.keys():
808
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
809
        del self.op.hvparams[name]
810
    # beparams
811
    be_defs = cluster.SimpleFillBE({})
812
    for name in self.op.beparams.keys():
813
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
814
        del self.op.beparams[name]
815
    # nic params
816
    nic_defs = cluster.SimpleFillNIC({})
817
    for nic in self.op.nics:
818
      for name in constants.NICS_PARAMETERS:
819
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
820
          del nic[name]
821
    # osparams
822
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
823
    for name in self.op.osparams.keys():
824
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
825
        del self.op.osparams[name]
826

    
827
  def _CalculateFileStorageDir(self):
828
    """Calculate final instance file storage dir.
829

830
    """
831
    # file storage dir calculation/check
832
    self.instance_file_storage_dir = None
833
    if self.op.disk_template in constants.DTS_FILEBASED:
834
      # build the full file storage dir path
835
      joinargs = []
836

    
837
      if self.op.disk_template == constants.DT_SHARED_FILE:
838
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
839
      else:
840
        get_fsd_fn = self.cfg.GetFileStorageDir
841

    
842
      cfg_storagedir = get_fsd_fn()
843
      if not cfg_storagedir:
844
        raise errors.OpPrereqError("Cluster file storage dir not defined",
845
                                   errors.ECODE_STATE)
846
      joinargs.append(cfg_storagedir)
847

    
848
      if self.op.file_storage_dir is not None:
849
        joinargs.append(self.op.file_storage_dir)
850

    
851
      joinargs.append(self.op.instance_name)
852

    
853
      # pylint: disable=W0142
854
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
855

    
856
  def CheckPrereq(self): # pylint: disable=R0914
857
    """Check prerequisites.
858

859
    """
860
    self._CalculateFileStorageDir()
861

    
862
    if self.op.mode == constants.INSTANCE_IMPORT:
863
      export_info = self._ReadExportInfo()
864
      self._ReadExportParams(export_info)
865
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
866
    else:
867
      self._old_instance_name = None
868

    
869
    if (not self.cfg.GetVGName() and
870
        self.op.disk_template not in constants.DTS_NOT_LVM):
871
      raise errors.OpPrereqError("Cluster does not support lvm-based"
872
                                 " instances", errors.ECODE_STATE)
873

    
874
    if (self.op.hypervisor is None or
875
        self.op.hypervisor == constants.VALUE_AUTO):
876
      self.op.hypervisor = self.cfg.GetHypervisorType()
877

    
878
    cluster = self.cfg.GetClusterInfo()
879
    enabled_hvs = cluster.enabled_hypervisors
880
    if self.op.hypervisor not in enabled_hvs:
881
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
882
                                 " cluster (%s)" %
883
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
884
                                 errors.ECODE_STATE)
885

    
886
    # Check tag validity
887
    for tag in self.op.tags:
888
      objects.TaggableObject.ValidateTag(tag)
889

    
890
    # check hypervisor parameter syntax (locally)
891
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
892
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
893
                                      self.op.hvparams)
894
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
895
    hv_type.CheckParameterSyntax(filled_hvp)
896
    self.hv_full = filled_hvp
897
    # check that we don't specify global parameters on an instance
898
    _CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
899
                          "instance", "cluster")
900

    
901
    # fill and remember the beparams dict
902
    self.be_full = _ComputeFullBeParams(self.op, cluster)
903

    
904
    # build os parameters
905
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
906

    
907
    # now that hvp/bep are in final format, let's reset to defaults,
908
    # if told to do so
909
    if self.op.identify_defaults:
910
      self._RevertToDefaults(cluster)
911

    
912
    # NIC buildup
913
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
914
                             self.proc.GetECId())
915

    
916
    # disk checks/pre-build
917
    default_vg = self.cfg.GetVGName()
918
    self.disks = _ComputeDisks(self.op, default_vg)
919

    
920
    if self.op.mode == constants.INSTANCE_IMPORT:
921
      disk_images = []
922
      for idx in range(len(self.disks)):
923
        option = "disk%d_dump" % idx
924
        if export_info.has_option(constants.INISECT_INS, option):
925
          # FIXME: are the old os-es, disk sizes, etc. useful?
926
          export_name = export_info.get(constants.INISECT_INS, option)
927
          image = utils.PathJoin(self.op.src_path, export_name)
928
          disk_images.append(image)
929
        else:
930
          disk_images.append(False)
931

    
932
      self.src_images = disk_images
933

    
934
      if self.op.instance_name == self._old_instance_name:
935
        for idx, nic in enumerate(self.nics):
936
          if nic.mac == constants.VALUE_AUTO:
937
            nic_mac_ini = "nic%d_mac" % idx
938
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
939

    
940
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
941

    
942
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
943
    if self.op.ip_check:
944
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
945
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
946
                                   (self.check_ip, self.op.instance_name),
947
                                   errors.ECODE_NOTUNIQUE)
948

    
949
    #### mac address generation
950
    # By generating here the mac address both the allocator and the hooks get
951
    # the real final mac address rather than the 'auto' or 'generate' value.
952
    # There is a race condition between the generation and the instance object
953
    # creation, which means that we know the mac is valid now, but we're not
954
    # sure it will be when we actually add the instance. If things go bad
955
    # adding the instance will abort because of a duplicate mac, and the
956
    # creation job will fail.
957
    for nic in self.nics:
958
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
959
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
960

    
961
    #### allocator run
962

    
963
    if self.op.iallocator is not None:
964
      self._RunAllocator()
965

    
966
    # Release all unneeded node locks
967
    keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
968
    _ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
969
    _ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
970
    _ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
971

    
972
    assert (self.owned_locks(locking.LEVEL_NODE) ==
973
            self.owned_locks(locking.LEVEL_NODE_RES)), \
974
      "Node locks differ from node resource locks"
975

    
976
    #### node related checks
977

    
978
    # check primary node
979
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
980
    assert self.pnode is not None, \
981
      "Cannot retrieve locked node %s" % self.op.pnode
982
    if pnode.offline:
983
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
984
                                 pnode.name, errors.ECODE_STATE)
985
    if pnode.drained:
986
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
987
                                 pnode.name, errors.ECODE_STATE)
988
    if not pnode.vm_capable:
989
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
990
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
991

    
992
    self.secondaries = []
993

    
994
    # Fill in any IPs from IP pools. This must happen here, because we need to
995
    # know the nic's primary node, as specified by the iallocator
996
    for idx, nic in enumerate(self.nics):
997
      net_uuid = nic.network
998
      if net_uuid is not None:
999
        nobj = self.cfg.GetNetwork(net_uuid)
1000
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
1001
        if netparams is None:
1002
          raise errors.OpPrereqError("No netparams found for network"
1003
                                     " %s. Propably not connected to"
1004
                                     " node's %s nodegroup" %
1005
                                     (nobj.name, self.pnode.name),
1006
                                     errors.ECODE_INVAL)
1007
        self.LogInfo("NIC/%d inherits netparams %s" %
1008
                     (idx, netparams.values()))
1009
        nic.nicparams = dict(netparams)
1010
        if nic.ip is not None:
1011
          if nic.ip.lower() == constants.NIC_IP_POOL:
1012
            try:
1013
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1014
            except errors.ReservationError:
1015
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1016
                                         " from the address pool" % idx,
1017
                                         errors.ECODE_STATE)
1018
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1019
          else:
1020
            try:
1021
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1022
            except errors.ReservationError:
1023
              raise errors.OpPrereqError("IP address %s already in use"
1024
                                         " or does not belong to network %s" %
1025
                                         (nic.ip, nobj.name),
1026
                                         errors.ECODE_NOTUNIQUE)
1027

    
1028
      # net is None, ip None or given
1029
      elif self.op.conflicts_check:
1030
        _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1031

    
1032
    # mirror node verification
1033
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1034
      if self.op.snode == pnode.name:
1035
        raise errors.OpPrereqError("The secondary node cannot be the"
1036
                                   " primary node", errors.ECODE_INVAL)
1037
      _CheckNodeOnline(self, self.op.snode)
1038
      _CheckNodeNotDrained(self, self.op.snode)
1039
      _CheckNodeVmCapable(self, self.op.snode)
1040
      self.secondaries.append(self.op.snode)
1041

    
1042
      snode = self.cfg.GetNodeInfo(self.op.snode)
1043
      if pnode.group != snode.group:
1044
        self.LogWarning("The primary and secondary nodes are in two"
1045
                        " different node groups; the disk parameters"
1046
                        " from the first disk's node group will be"
1047
                        " used")
1048

    
1049
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1050
      nodes = [pnode]
1051
      if self.op.disk_template in constants.DTS_INT_MIRROR:
1052
        nodes.append(snode)
1053
      has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
1054
      if compat.any(map(has_es, nodes)):
1055
        raise errors.OpPrereqError("Disk template %s not supported with"
1056
                                   " exclusive storage" % self.op.disk_template,
1057
                                   errors.ECODE_STATE)
1058

    
1059
    nodenames = [pnode.name] + self.secondaries
1060

    
1061
    if not self.adopt_disks:
1062
      if self.op.disk_template == constants.DT_RBD:
1063
        # _CheckRADOSFreeSpace() is just a placeholder.
1064
        # Any function that checks prerequisites can be placed here.
1065
        # Check if there is enough space on the RADOS cluster.
1066
        _CheckRADOSFreeSpace()
1067
      elif self.op.disk_template == constants.DT_EXT:
1068
        # FIXME: Function that checks prereqs if needed
1069
        pass
1070
      else:
1071
        # Check lv size requirements, if not adopting
1072
        req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1073
        _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1074

    
1075
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1076
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1077
                                disk[constants.IDISK_ADOPT])
1078
                     for disk in self.disks])
1079
      if len(all_lvs) != len(self.disks):
1080
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1081
                                   errors.ECODE_INVAL)
1082
      for lv_name in all_lvs:
1083
        try:
1084
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1085
          # to ReserveLV uses the same syntax
1086
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1087
        except errors.ReservationError:
1088
          raise errors.OpPrereqError("LV named %s used by another instance" %
1089
                                     lv_name, errors.ECODE_NOTUNIQUE)
1090

    
1091
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1092
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1093

    
1094
      node_lvs = self.rpc.call_lv_list([pnode.name],
1095
                                       vg_names.payload.keys())[pnode.name]
1096
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1097
      node_lvs = node_lvs.payload
1098

    
1099
      delta = all_lvs.difference(node_lvs.keys())
1100
      if delta:
1101
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1102
                                   utils.CommaJoin(delta),
1103
                                   errors.ECODE_INVAL)
1104
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1105
      if online_lvs:
1106
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1107
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1108
                                   errors.ECODE_STATE)
1109
      # update the size of disk based on what is found
1110
      for dsk in self.disks:
1111
        dsk[constants.IDISK_SIZE] = \
1112
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1113
                                        dsk[constants.IDISK_ADOPT])][0]))
1114

    
1115
    elif self.op.disk_template == constants.DT_BLOCK:
1116
      # Normalize and de-duplicate device paths
1117
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1118
                       for disk in self.disks])
1119
      if len(all_disks) != len(self.disks):
1120
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1121
                                   errors.ECODE_INVAL)
1122
      baddisks = [d for d in all_disks
1123
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1124
      if baddisks:
1125
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1126
                                   " cannot be adopted" %
1127
                                   (utils.CommaJoin(baddisks),
1128
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1129
                                   errors.ECODE_INVAL)
1130

    
1131
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
1132
                                            list(all_disks))[pnode.name]
1133
      node_disks.Raise("Cannot get block device information from node %s" %
1134
                       pnode.name)
1135
      node_disks = node_disks.payload
1136
      delta = all_disks.difference(node_disks.keys())
1137
      if delta:
1138
        raise errors.OpPrereqError("Missing block device(s): %s" %
1139
                                   utils.CommaJoin(delta),
1140
                                   errors.ECODE_INVAL)
1141
      for dsk in self.disks:
1142
        dsk[constants.IDISK_SIZE] = \
1143
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1144

    
1145
    # Verify instance specs
1146
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1147
    ispec = {
1148
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1149
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1150
      constants.ISPEC_DISK_COUNT: len(self.disks),
1151
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1152
                                  for disk in self.disks],
1153
      constants.ISPEC_NIC_COUNT: len(self.nics),
1154
      constants.ISPEC_SPINDLE_USE: spindle_use,
1155
      }
1156

    
1157
    group_info = self.cfg.GetNodeGroup(pnode.group)
1158
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1159
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1160
                                               self.op.disk_template)
1161
    if not self.op.ignore_ipolicy and res:
1162
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1163
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1164
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1165

    
1166
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1167

    
1168
    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1169
    # check OS parameters (remotely)
1170
    _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1171

    
1172
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1173

    
1174
    #TODO: _CheckExtParams (remotely)
1175
    # Check parameters for extstorage
1176

    
1177
    # memory check on primary node
1178
    #TODO(dynmem): use MINMEM for checking
1179
    if self.op.start:
1180
      _CheckNodeFreeMemory(self, self.pnode.name,
1181
                           "creating instance %s" % self.op.instance_name,
1182
                           self.be_full[constants.BE_MAXMEM],
1183
                           self.op.hypervisor)
1184

    
1185
    self.dry_run_result = list(nodenames)
1186

    
1187
  def Exec(self, feedback_fn):
1188
    """Create and add the instance to the cluster.
1189

1190
    """
1191
    instance = self.op.instance_name
1192
    pnode_name = self.pnode.name
1193

    
1194
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1195
                self.owned_locks(locking.LEVEL_NODE)), \
1196
      "Node locks differ from node resource locks"
1197
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1198

    
1199
    ht_kind = self.op.hypervisor
1200
    if ht_kind in constants.HTS_REQ_PORT:
1201
      network_port = self.cfg.AllocatePort()
1202
    else:
1203
      network_port = None
1204

    
1205
    # This is ugly but we got a chicken-egg problem here
1206
    # We can only take the group disk parameters, as the instance
1207
    # has no disks yet (we are generating them right here).
1208
    node = self.cfg.GetNodeInfo(pnode_name)
1209
    nodegroup = self.cfg.GetNodeGroup(node.group)
1210
    disks = _GenerateDiskTemplate(self,
1211
                                  self.op.disk_template,
1212
                                  instance, pnode_name,
1213
                                  self.secondaries,
1214
                                  self.disks,
1215
                                  self.instance_file_storage_dir,
1216
                                  self.op.file_driver,
1217
                                  0,
1218
                                  feedback_fn,
1219
                                  self.cfg.GetGroupDiskParams(nodegroup))
1220

    
1221
    iobj = objects.Instance(name=instance, os=self.op.os_type,
1222
                            primary_node=pnode_name,
1223
                            nics=self.nics, disks=disks,
1224
                            disk_template=self.op.disk_template,
1225
                            admin_state=constants.ADMINST_DOWN,
1226
                            network_port=network_port,
1227
                            beparams=self.op.beparams,
1228
                            hvparams=self.op.hvparams,
1229
                            hypervisor=self.op.hypervisor,
1230
                            osparams=self.op.osparams,
1231
                            )
1232

    
1233
    if self.op.tags:
1234
      for tag in self.op.tags:
1235
        iobj.AddTag(tag)
1236

    
1237
    if self.adopt_disks:
1238
      if self.op.disk_template == constants.DT_PLAIN:
1239
        # rename LVs to the newly-generated names; we need to construct
1240
        # 'fake' LV disks with the old data, plus the new unique_id
1241
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1242
        rename_to = []
1243
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1244
          rename_to.append(t_dsk.logical_id)
1245
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1246
          self.cfg.SetDiskID(t_dsk, pnode_name)
1247
        result = self.rpc.call_blockdev_rename(pnode_name,
1248
                                               zip(tmp_disks, rename_to))
1249
        result.Raise("Failed to rename adoped LVs")
1250
    else:
1251
      feedback_fn("* creating instance disks...")
1252
      try:
1253
        _CreateDisks(self, iobj)
1254
      except errors.OpExecError:
1255
        self.LogWarning("Device creation failed")
1256
        self.cfg.ReleaseDRBDMinors(instance)
1257
        raise
1258

    
1259
    feedback_fn("adding instance %s to cluster config" % instance)
1260

    
1261
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1262

    
1263
    # Declare that we don't want to remove the instance lock anymore, as we've
1264
    # added the instance to the config
1265
    del self.remove_locks[locking.LEVEL_INSTANCE]
1266

    
1267
    if self.op.mode == constants.INSTANCE_IMPORT:
1268
      # Release unused nodes
1269
      _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1270
    else:
1271
      # Release all nodes
1272
      _ReleaseLocks(self, locking.LEVEL_NODE)
1273

    
1274
    disk_abort = False
1275
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1276
      feedback_fn("* wiping instance disks...")
1277
      try:
1278
        _WipeDisks(self, iobj)
1279
      except errors.OpExecError, err:
1280
        logging.exception("Wiping disks failed")
1281
        self.LogWarning("Wiping instance disks failed (%s)", err)
1282
        disk_abort = True
1283

    
1284
    if disk_abort:
1285
      # Something is already wrong with the disks, don't do anything else
1286
      pass
1287
    elif self.op.wait_for_sync:
1288
      disk_abort = not _WaitForSync(self, iobj)
1289
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1290
      # make sure the disks are not degraded (still sync-ing is ok)
1291
      feedback_fn("* checking mirrors status")
1292
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
1293
    else:
1294
      disk_abort = False
1295

    
1296
    if disk_abort:
1297
      _RemoveDisks(self, iobj)
1298
      self.cfg.RemoveInstance(iobj.name)
1299
      # Make sure the instance lock gets removed
1300
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1301
      raise errors.OpExecError("There are some degraded disks for"
1302
                               " this instance")
1303

    
1304
    # Release all node resource locks
1305
    _ReleaseLocks(self, locking.LEVEL_NODE_RES)
1306

    
1307
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1308
      # we need to set the disks ID to the primary node, since the
1309
      # preceding code might or might have not done it, depending on
1310
      # disk template and other options
1311
      for disk in iobj.disks:
1312
        self.cfg.SetDiskID(disk, pnode_name)
1313
      if self.op.mode == constants.INSTANCE_CREATE:
1314
        if not self.op.no_install:
1315
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1316
                        not self.op.wait_for_sync)
1317
          if pause_sync:
1318
            feedback_fn("* pausing disk sync to install instance OS")
1319
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1320
                                                              (iobj.disks,
1321
                                                               iobj), True)
1322
            for idx, success in enumerate(result.payload):
1323
              if not success:
1324
                logging.warn("pause-sync of instance %s for disk %d failed",
1325
                             instance, idx)
1326

    
1327
          feedback_fn("* running the instance OS create scripts...")
1328
          # FIXME: pass debug option from opcode to backend
1329
          os_add_result = \
1330
            self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1331
                                          self.op.debug_level)
1332
          if pause_sync:
1333
            feedback_fn("* resuming disk sync")
1334
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1335
                                                              (iobj.disks,
1336
                                                               iobj), False)
1337
            for idx, success in enumerate(result.payload):
1338
              if not success:
1339
                logging.warn("resume-sync of instance %s for disk %d failed",
1340
                             instance, idx)
1341

    
1342
          os_add_result.Raise("Could not add os for instance %s"
1343
                              " on node %s" % (instance, pnode_name))
1344

    
1345
      else:
1346
        if self.op.mode == constants.INSTANCE_IMPORT:
1347
          feedback_fn("* running the instance OS import scripts...")
1348

    
1349
          transfers = []
1350

    
1351
          for idx, image in enumerate(self.src_images):
1352
            if not image:
1353
              continue
1354

    
1355
            # FIXME: pass debug option from opcode to backend
1356
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1357
                                               constants.IEIO_FILE, (image, ),
1358
                                               constants.IEIO_SCRIPT,
1359
                                               (iobj.disks[idx], idx),
1360
                                               None)
1361
            transfers.append(dt)
1362

    
1363
          import_result = \
1364
            masterd.instance.TransferInstanceData(self, feedback_fn,
1365
                                                  self.op.src_node, pnode_name,
1366
                                                  self.pnode.secondary_ip,
1367
                                                  iobj, transfers)
1368
          if not compat.all(import_result):
1369
            self.LogWarning("Some disks for instance %s on node %s were not"
1370
                            " imported successfully" % (instance, pnode_name))
1371

    
1372
          rename_from = self._old_instance_name
1373

    
1374
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1375
          feedback_fn("* preparing remote import...")
1376
          # The source cluster will stop the instance before attempting to make
1377
          # a connection. In some cases stopping an instance can take a long
1378
          # time, hence the shutdown timeout is added to the connection
1379
          # timeout.
1380
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1381
                             self.op.source_shutdown_timeout)
1382
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1383

    
1384
          assert iobj.primary_node == self.pnode.name
1385
          disk_results = \
1386
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1387
                                          self.source_x509_ca,
1388
                                          self._cds, timeouts)
1389
          if not compat.all(disk_results):
1390
            # TODO: Should the instance still be started, even if some disks
1391
            # failed to import (valid for local imports, too)?
1392
            self.LogWarning("Some disks for instance %s on node %s were not"
1393
                            " imported successfully" % (instance, pnode_name))
1394

    
1395
          rename_from = self.source_instance_name
1396

    
1397
        else:
1398
          # also checked in the prereq part
1399
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1400
                                       % self.op.mode)
1401

    
1402
        # Run rename script on newly imported instance
1403
        assert iobj.name == instance
1404
        feedback_fn("Running rename script for %s" % instance)
1405
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1406
                                                   rename_from,
1407
                                                   self.op.debug_level)
1408
        if result.fail_msg:
1409
          self.LogWarning("Failed to run rename script for %s on node"
1410
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
1411

    
1412
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1413

    
1414
    if self.op.start:
1415
      iobj.admin_state = constants.ADMINST_UP
1416
      self.cfg.Update(iobj, feedback_fn)
1417
      logging.info("Starting instance %s on node %s", instance, pnode_name)
1418
      feedback_fn("* starting instance...")
1419
      result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1420
                                            False, self.op.reason)
1421
      result.Raise("Could not start instance")
1422

    
1423
    return list(iobj.all_nodes)
1424

    
1425

    
1426
class LUInstanceRename(LogicalUnit):
1427
  """Rename an instance.
1428

1429
  """
1430
  HPATH = "instance-rename"
1431
  HTYPE = constants.HTYPE_INSTANCE
1432

    
1433
  def CheckArguments(self):
1434
    """Check arguments.
1435

1436
    """
1437
    if self.op.ip_check and not self.op.name_check:
1438
      # TODO: make the ip check more flexible and not depend on the name check
1439
      raise errors.OpPrereqError("IP address check requires a name check",
1440
                                 errors.ECODE_INVAL)
1441

    
1442
  def BuildHooksEnv(self):
1443
    """Build hooks env.
1444

1445
    This runs on master, primary and secondary nodes of the instance.
1446

1447
    """
1448
    env = _BuildInstanceHookEnvByObject(self, self.instance)
1449
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1450
    return env
1451

    
1452
  def BuildHooksNodes(self):
1453
    """Build hooks nodes.
1454

1455
    """
1456
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1457
    return (nl, nl)
1458

    
1459
  def CheckPrereq(self):
1460
    """Check prerequisites.
1461

1462
    This checks that the instance is in the cluster and is not running.
1463

1464
    """
1465
    self.op.instance_name = _ExpandInstanceName(self.cfg,
1466
                                                self.op.instance_name)
1467
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1468
    assert instance is not None
1469
    _CheckNodeOnline(self, instance.primary_node)
1470
    _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1471
                        msg="cannot rename")
1472
    self.instance = instance
1473

    
1474
    new_name = self.op.new_name
1475
    if self.op.name_check:
1476
      hostname = _CheckHostnameSane(self, new_name)
1477
      new_name = self.op.new_name = hostname.name
1478
      if (self.op.ip_check and
1479
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1480
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1481
                                   (hostname.ip, new_name),
1482
                                   errors.ECODE_NOTUNIQUE)
1483

    
1484
    instance_list = self.cfg.GetInstanceList()
1485
    if new_name in instance_list and new_name != instance.name:
1486
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1487
                                 new_name, errors.ECODE_EXISTS)
1488

    
1489
  def Exec(self, feedback_fn):
1490
    """Rename the instance.
1491

1492
    """
1493
    inst = self.instance
1494
    old_name = inst.name
1495

    
1496
    rename_file_storage = False
1497
    if (inst.disk_template in constants.DTS_FILEBASED and
1498
        self.op.new_name != inst.name):
1499
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1500
      rename_file_storage = True
1501

    
1502
    self.cfg.RenameInstance(inst.name, self.op.new_name)
1503
    # Change the instance lock. This is definitely safe while we hold the BGL.
1504
    # Otherwise the new lock would have to be added in acquired mode.
1505
    assert self.REQ_BGL
1506
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1507
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1508
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1509

    
1510
    # re-read the instance from the configuration after rename
1511
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
1512

    
1513
    if rename_file_storage:
1514
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1515
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1516
                                                     old_file_storage_dir,
1517
                                                     new_file_storage_dir)
1518
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1519
                   " (but the instance has been renamed in Ganeti)" %
1520
                   (inst.primary_node, old_file_storage_dir,
1521
                    new_file_storage_dir))
1522

    
1523
    _StartInstanceDisks(self, inst, None)
1524
    # update info on disks
1525
    info = _GetInstanceInfoText(inst)
1526
    for (idx, disk) in enumerate(inst.disks):
1527
      for node in inst.all_nodes:
1528
        self.cfg.SetDiskID(disk, node)
1529
        result = self.rpc.call_blockdev_setinfo(node, disk, info)
1530
        if result.fail_msg:
1531
          self.LogWarning("Error setting info on node %s for disk %s: %s",
1532
                          node, idx, result.fail_msg)
1533
    try:
1534
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1535
                                                 old_name, self.op.debug_level)
1536
      msg = result.fail_msg
1537
      if msg:
1538
        msg = ("Could not run OS rename script for instance %s on node %s"
1539
               " (but the instance has been renamed in Ganeti): %s" %
1540
               (inst.name, inst.primary_node, msg))
1541
        self.LogWarning(msg)
1542
    finally:
1543
      _ShutdownInstanceDisks(self, inst)
1544

    
1545
    return inst.name
1546

    
1547

    
1548
class LUInstanceRemove(LogicalUnit):
1549
  """Remove an instance.
1550

1551
  """
1552
  HPATH = "instance-remove"
1553
  HTYPE = constants.HTYPE_INSTANCE
1554
  REQ_BGL = False
1555

    
1556
  def ExpandNames(self):
1557
    self._ExpandAndLockInstance()
1558
    self.needed_locks[locking.LEVEL_NODE] = []
1559
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1560
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1561

    
1562
  def DeclareLocks(self, level):
1563
    if level == locking.LEVEL_NODE:
1564
      self._LockInstancesNodes()
1565
    elif level == locking.LEVEL_NODE_RES:
1566
      # Copy node locks
1567
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1568
        _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1569

    
1570
  def BuildHooksEnv(self):
1571
    """Build hooks env.
1572

1573
    This runs on master, primary and secondary nodes of the instance.
1574

1575
    """
1576
    env = _BuildInstanceHookEnvByObject(self, self.instance)
1577
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1578
    return env
1579

    
1580
  def BuildHooksNodes(self):
1581
    """Build hooks nodes.
1582

1583
    """
1584
    nl = [self.cfg.GetMasterNode()]
1585
    nl_post = list(self.instance.all_nodes) + nl
1586
    return (nl, nl_post)
1587

    
1588
  def CheckPrereq(self):
1589
    """Check prerequisites.
1590

1591
    This checks that the instance is in the cluster.
1592

1593
    """
1594
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1595
    assert self.instance is not None, \
1596
      "Cannot retrieve locked instance %s" % self.op.instance_name
1597

    
1598
  def Exec(self, feedback_fn):
1599
    """Remove the instance.
1600

1601
    """
1602
    instance = self.instance
1603
    logging.info("Shutting down instance %s on node %s",
1604
                 instance.name, instance.primary_node)
1605

    
1606
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1607
                                             self.op.shutdown_timeout,
1608
                                             self.op.reason)
1609
    msg = result.fail_msg
1610
    if msg:
1611
      if self.op.ignore_failures:
1612
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
1613
      else:
1614
        raise errors.OpExecError("Could not shutdown instance %s on"
1615
                                 " node %s: %s" %
1616
                                 (instance.name, instance.primary_node, msg))
1617

    
1618
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1619
            self.owned_locks(locking.LEVEL_NODE_RES))
1620
    assert not (set(instance.all_nodes) -
1621
                self.owned_locks(locking.LEVEL_NODE)), \
1622
      "Not owning correct locks"
1623

    
1624
    _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1625

    
1626

    
1627
class LUInstanceMove(LogicalUnit):
1628
  """Move an instance by data-copying.
1629

1630
  """
1631
  HPATH = "instance-move"
1632
  HTYPE = constants.HTYPE_INSTANCE
1633
  REQ_BGL = False
1634

    
1635
  def ExpandNames(self):
1636
    self._ExpandAndLockInstance()
1637
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
1638
    self.op.target_node = target_node
1639
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
1640
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1641
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1642

    
1643
  def DeclareLocks(self, level):
1644
    if level == locking.LEVEL_NODE:
1645
      self._LockInstancesNodes(primary_only=True)
1646
    elif level == locking.LEVEL_NODE_RES:
1647
      # Copy node locks
1648
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1649
        _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1650

    
1651
  def BuildHooksEnv(self):
1652
    """Build hooks env.
1653

1654
    This runs on master, primary and secondary nodes of the instance.
1655

1656
    """
1657
    env = {
1658
      "TARGET_NODE": self.op.target_node,
1659
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1660
      }
1661
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
1662
    return env
1663

    
1664
  def BuildHooksNodes(self):
1665
    """Build hooks nodes.
1666

1667
    """
1668
    nl = [
1669
      self.cfg.GetMasterNode(),
1670
      self.instance.primary_node,
1671
      self.op.target_node,
1672
      ]
1673
    return (nl, nl)
1674

    
1675
  def CheckPrereq(self):
1676
    """Check prerequisites.
1677

1678
    This checks that the instance is in the cluster.
1679

1680
    """
1681
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1682
    assert self.instance is not None, \
1683
      "Cannot retrieve locked instance %s" % self.op.instance_name
1684

    
1685
    if instance.disk_template not in constants.DTS_COPYABLE:
1686
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1687
                                 instance.disk_template, errors.ECODE_STATE)
1688

    
1689
    node = self.cfg.GetNodeInfo(self.op.target_node)
1690
    assert node is not None, \
1691
      "Cannot retrieve locked node %s" % self.op.target_node
1692

    
1693
    self.target_node = target_node = node.name
1694

    
1695
    if target_node == instance.primary_node:
1696
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1697
                                 (instance.name, target_node),
1698
                                 errors.ECODE_STATE)
1699

    
1700
    bep = self.cfg.GetClusterInfo().FillBE(instance)
1701

    
1702
    for idx, dsk in enumerate(instance.disks):
1703
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1704
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1705
                                   " cannot copy" % idx, errors.ECODE_STATE)
1706

    
1707
    _CheckNodeOnline(self, target_node)
1708
    _CheckNodeNotDrained(self, target_node)
1709
    _CheckNodeVmCapable(self, target_node)
1710
    cluster = self.cfg.GetClusterInfo()
1711
    group_info = self.cfg.GetNodeGroup(node.group)
1712
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1713
    _CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1714
                            ignore=self.op.ignore_ipolicy)
1715

    
1716
    if instance.admin_state == constants.ADMINST_UP:
1717
      # check memory requirements on the secondary node
1718
      _CheckNodeFreeMemory(self, target_node,
1719
                           "failing over instance %s" %
1720
                           instance.name, bep[constants.BE_MAXMEM],
1721
                           instance.hypervisor)
1722
    else:
1723
      self.LogInfo("Not checking memory on the secondary node as"
1724
                   " instance will not be started")
1725

    
1726
    # check bridge existance
1727
    _CheckInstanceBridgesExist(self, instance, node=target_node)
1728

    
1729
  def Exec(self, feedback_fn):
1730
    """Move an instance.
1731

1732
    The move is done by shutting it down on its present node, copying
1733
    the data over (slow) and starting it on the new node.
1734

1735
    """
1736
    instance = self.instance
1737

    
1738
    source_node = instance.primary_node
1739
    target_node = self.target_node
1740

    
1741
    self.LogInfo("Shutting down instance %s on source node %s",
1742
                 instance.name, source_node)
1743

    
1744
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1745
            self.owned_locks(locking.LEVEL_NODE_RES))
1746

    
1747
    result = self.rpc.call_instance_shutdown(source_node, instance,
1748
                                             self.op.shutdown_timeout,
1749
                                             self.op.reason)
1750
    msg = result.fail_msg
1751
    if msg:
1752
      if self.op.ignore_consistency:
1753
        self.LogWarning("Could not shutdown instance %s on node %s."
1754
                        " Proceeding anyway. Please make sure node"
1755
                        " %s is down. Error details: %s",
1756
                        instance.name, source_node, source_node, msg)
1757
      else:
1758
        raise errors.OpExecError("Could not shutdown instance %s on"
1759
                                 " node %s: %s" %
1760
                                 (instance.name, source_node, msg))
1761

    
1762
    # create the target disks
1763
    try:
1764
      _CreateDisks(self, instance, target_node=target_node)
1765
    except errors.OpExecError:
1766
      self.LogWarning("Device creation failed")
1767
      self.cfg.ReleaseDRBDMinors(instance.name)
1768
      raise
1769

    
1770
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1771

    
1772
    errs = []
1773
    # activate, get path, copy the data over
1774
    for idx, disk in enumerate(instance.disks):
1775
      self.LogInfo("Copying data for disk %d", idx)
1776
      result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1777
                                               instance.name, True, idx)
1778
      if result.fail_msg:
1779
        self.LogWarning("Can't assemble newly created disk %d: %s",
1780
                        idx, result.fail_msg)
1781
        errs.append(result.fail_msg)
1782
        break
1783
      dev_path = result.payload
1784
      result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1785
                                             target_node, dev_path,
1786
                                             cluster_name)
1787
      if result.fail_msg:
1788
        self.LogWarning("Can't copy data over for disk %d: %s",
1789
                        idx, result.fail_msg)
1790
        errs.append(result.fail_msg)
1791
        break
1792

    
1793
    if errs:
1794
      self.LogWarning("Some disks failed to copy, aborting")
1795
      try:
1796
        _RemoveDisks(self, instance, target_node=target_node)
1797
      finally:
1798
        self.cfg.ReleaseDRBDMinors(instance.name)
1799
        raise errors.OpExecError("Errors during disk copy: %s" %
1800
                                 (",".join(errs),))
1801

    
1802
    instance.primary_node = target_node
1803
    self.cfg.Update(instance, feedback_fn)
1804

    
1805
    self.LogInfo("Removing the disks on the original node")
1806
    _RemoveDisks(self, instance, target_node=source_node)
1807

    
1808
    # Only start the instance if it's marked as up
1809
    if instance.admin_state == constants.ADMINST_UP:
1810
      self.LogInfo("Starting instance %s on node %s",
1811
                   instance.name, target_node)
1812

    
1813
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
1814
                                           ignore_secondaries=True)
1815
      if not disks_ok:
1816
        _ShutdownInstanceDisks(self, instance)
1817
        raise errors.OpExecError("Can't activate the instance's disks")
1818

    
1819
      result = self.rpc.call_instance_start(target_node,
1820
                                            (instance, None, None), False,
1821
                                            self.op.reason)
1822
      msg = result.fail_msg
1823
      if msg:
1824
        _ShutdownInstanceDisks(self, instance)
1825
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1826
                                 (instance.name, target_node, msg))
1827

    
1828

    
1829
def _GetInstanceConsole(cluster, instance):
1830
  """Returns console information for an instance.
1831

1832
  @type cluster: L{objects.Cluster}
1833
  @type instance: L{objects.Instance}
1834
  @rtype: dict
1835

1836
  """
1837
  hyper = hypervisor.GetHypervisorClass(instance.hypervisor)
1838
  # beparams and hvparams are passed separately, to avoid editing the
1839
  # instance and then saving the defaults in the instance itself.
1840
  hvparams = cluster.FillHV(instance)
1841
  beparams = cluster.FillBE(instance)
1842
  console = hyper.GetInstanceConsole(instance, hvparams, beparams)
1843

    
1844
  assert console.instance == instance.name
1845
  assert console.Validate()
1846

    
1847
  return console.ToDict()
1848

    
1849

    
1850
class _InstanceQuery(_QueryBase):
1851
  FIELDS = query.INSTANCE_FIELDS
1852

    
1853
  def ExpandNames(self, lu):
1854
    lu.needed_locks = {}
1855
    lu.share_locks = _ShareAll()
1856

    
1857
    if self.names:
1858
      self.wanted = _GetWantedInstances(lu, self.names)
1859
    else:
1860
      self.wanted = locking.ALL_SET
1861

    
1862
    self.do_locking = (self.use_locking and
1863
                       query.IQ_LIVE in self.requested_data)
1864
    if self.do_locking:
1865
      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
1866
      lu.needed_locks[locking.LEVEL_NODEGROUP] = []
1867
      lu.needed_locks[locking.LEVEL_NODE] = []
1868
      lu.needed_locks[locking.LEVEL_NETWORK] = []
1869
      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1870

    
1871
    self.do_grouplocks = (self.do_locking and
1872
                          query.IQ_NODES in self.requested_data)
1873

    
1874
  def DeclareLocks(self, lu, level):
1875
    if self.do_locking:
1876
      if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
1877
        assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
1878

    
1879
        # Lock all groups used by instances optimistically; this requires going
1880
        # via the node before it's locked, requiring verification later on
1881
        lu.needed_locks[locking.LEVEL_NODEGROUP] = \
1882
          set(group_uuid
1883
              for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
1884
              for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
1885
      elif level == locking.LEVEL_NODE:
1886
        lu._LockInstancesNodes() # pylint: disable=W0212
1887

    
1888
      elif level == locking.LEVEL_NETWORK:
1889
        lu.needed_locks[locking.LEVEL_NETWORK] = \
1890
          frozenset(net_uuid
1891
                    for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
1892
                    for net_uuid in lu.cfg.GetInstanceNetworks(instance_name))
1893

    
1894
  @staticmethod
1895
  def _CheckGroupLocks(lu):
1896
    owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
1897
    owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
1898

    
1899
    # Check if node groups for locked instances are still correct
1900
    for instance_name in owned_instances:
1901
      _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
1902

    
1903
  def _GetQueryData(self, lu):
1904
    """Computes the list of instances and their attributes.
1905

1906
    """
1907
    if self.do_grouplocks:
1908
      self._CheckGroupLocks(lu)
1909

    
1910
    cluster = lu.cfg.GetClusterInfo()
1911
    all_info = lu.cfg.GetAllInstancesInfo()
1912

    
1913
    instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
1914

    
1915
    instance_list = [all_info[name] for name in instance_names]
1916
    nodes = frozenset(itertools.chain(*(inst.all_nodes
1917
                                        for inst in instance_list)))
1918
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
1919
    bad_nodes = []
1920
    offline_nodes = []
1921
    wrongnode_inst = set()
1922

    
1923
    # Gather data as requested
1924
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
1925
      live_data = {}
1926
      node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
1927
      for name in nodes:
1928
        result = node_data[name]
1929
        if result.offline:
1930
          # offline nodes will be in both lists
1931
          assert result.fail_msg
1932
          offline_nodes.append(name)
1933
        if result.fail_msg:
1934
          bad_nodes.append(name)
1935
        elif result.payload:
1936
          for inst in result.payload:
1937
            if inst in all_info:
1938
              if all_info[inst].primary_node == name:
1939
                live_data.update(result.payload)
1940
              else:
1941
                wrongnode_inst.add(inst)
1942
            else:
1943
              # orphan instance; we don't list it here as we don't
1944
              # handle this case yet in the output of instance listing
1945
              logging.warning("Orphan instance '%s' found on node %s",
1946
                              inst, name)
1947
              # else no instance is alive
1948
    else:
1949
      live_data = {}
1950

    
1951
    if query.IQ_DISKUSAGE in self.requested_data:
1952
      gmi = ganeti.masterd.instance
1953
      disk_usage = dict((inst.name,
1954
                         gmi.ComputeDiskSize(inst.disk_template,
1955
                                             [{constants.IDISK_SIZE: disk.size}
1956
                                              for disk in inst.disks]))
1957
                        for inst in instance_list)
1958
    else:
1959
      disk_usage = None
1960

    
1961
    if query.IQ_CONSOLE in self.requested_data:
1962
      consinfo = {}
1963
      for inst in instance_list:
1964
        if inst.name in live_data:
1965
          # Instance is running
1966
          consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
1967
        else:
1968
          consinfo[inst.name] = None
1969
      assert set(consinfo.keys()) == set(instance_names)
1970
    else:
1971
      consinfo = None
1972

    
1973
    if query.IQ_NODES in self.requested_data:
1974
      node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
1975
                                            instance_list)))
1976
      nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
1977
      groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
1978
                    for uuid in set(map(operator.attrgetter("group"),
1979
                                        nodes.values())))
1980
    else:
1981
      nodes = None
1982
      groups = None
1983

    
1984
    if query.IQ_NETWORKS in self.requested_data:
1985
      net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.name)
1986
                                    for i in instance_list))
1987
      networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
1988
    else:
1989
      networks = None
1990

    
1991
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
1992
                                   disk_usage, offline_nodes, bad_nodes,
1993
                                   live_data, wrongnode_inst, consinfo,
1994
                                   nodes, groups, networks)
1995

    
1996

    
1997
class LUInstanceQuery(NoHooksLU):
1998
  """Logical unit for querying instances.
1999

2000
  """
2001
  # pylint: disable=W0142
2002
  REQ_BGL = False
2003

    
2004
  def CheckArguments(self):
2005
    self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
2006
                             self.op.output_fields, self.op.use_locking)
2007

    
2008
  def ExpandNames(self):
2009
    self.iq.ExpandNames(self)
2010

    
2011
  def DeclareLocks(self, level):
2012
    self.iq.DeclareLocks(self, level)
2013

    
2014
  def Exec(self, feedback_fn):
2015
    return self.iq.OldStyleQuery(self)
2016

    
2017

    
2018
class LUInstanceQueryData(NoHooksLU):
2019
  """Query runtime instance data.
2020

2021
  """
2022
  REQ_BGL = False
2023

    
2024
  def ExpandNames(self):
2025
    self.needed_locks = {}
2026

    
2027
    # Use locking if requested or when non-static information is wanted
2028
    if not (self.op.static or self.op.use_locking):
2029
      self.LogWarning("Non-static data requested, locks need to be acquired")
2030
      self.op.use_locking = True
2031

    
2032
    if self.op.instances or not self.op.use_locking:
2033
      # Expand instance names right here
2034
      self.wanted_names = _GetWantedInstances(self, self.op.instances)
2035
    else:
2036
      # Will use acquired locks
2037
      self.wanted_names = None
2038

    
2039
    if self.op.use_locking:
2040
      self.share_locks = _ShareAll()
2041

    
2042
      if self.wanted_names is None:
2043
        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
2044
      else:
2045
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
2046

    
2047
      self.needed_locks[locking.LEVEL_NODEGROUP] = []
2048
      self.needed_locks[locking.LEVEL_NODE] = []
2049
      self.needed_locks[locking.LEVEL_NETWORK] = []
2050
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2051

    
2052
  def DeclareLocks(self, level):
2053
    if self.op.use_locking:
2054
      owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
2055
      if level == locking.LEVEL_NODEGROUP:
2056

    
2057
        # Lock all groups used by instances optimistically; this requires going
2058
        # via the node before it's locked, requiring verification later on
2059
        self.needed_locks[locking.LEVEL_NODEGROUP] = \
2060
          frozenset(group_uuid
2061
                    for instance_name in owned_instances
2062
                    for group_uuid in
2063
                    self.cfg.GetInstanceNodeGroups(instance_name))
2064

    
2065
      elif level == locking.LEVEL_NODE:
2066
        self._LockInstancesNodes()
2067

    
2068
      elif level == locking.LEVEL_NETWORK:
2069
        self.needed_locks[locking.LEVEL_NETWORK] = \
2070
          frozenset(net_uuid
2071
                    for instance_name in owned_instances
2072
                    for net_uuid in
2073
                    self.cfg.GetInstanceNetworks(instance_name))
2074

    
2075
  def CheckPrereq(self):
2076
    """Check prerequisites.
2077

2078
    This only checks the optional instance list against the existing names.
2079

2080
    """
2081
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
2082
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
2083
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
2084
    owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
2085

    
2086
    if self.wanted_names is None:
2087
      assert self.op.use_locking, "Locking was not used"
2088
      self.wanted_names = owned_instances
2089

    
2090
    instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
2091

    
2092
    if self.op.use_locking:
2093
      _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
2094
                                None)
2095
    else:
2096
      assert not (owned_instances or owned_groups or
2097
                  owned_nodes or owned_networks)
2098

    
2099
    self.wanted_instances = instances.values()
2100

    
2101
  def _ComputeBlockdevStatus(self, node, instance, dev):
2102
    """Returns the status of a block device
2103

2104
    """
2105
    if self.op.static or not node:
2106
      return None
2107

    
2108
    self.cfg.SetDiskID(dev, node)
2109

    
2110
    result = self.rpc.call_blockdev_find(node, dev)
2111
    if result.offline:
2112
      return None
2113

    
2114
    result.Raise("Can't compute disk status for %s" % instance.name)
2115

    
2116
    status = result.payload
2117
    if status is None:
2118
      return None
2119

    
2120
    return (status.dev_path, status.major, status.minor,
2121
            status.sync_percent, status.estimated_time,
2122
            status.is_degraded, status.ldisk_status)
2123

    
2124
  def _ComputeDiskStatus(self, instance, snode, dev):
2125
    """Compute block device status.
2126

2127
    """
2128
    (anno_dev,) = _AnnotateDiskParams(instance, [dev], self.cfg)
2129

    
2130
    return self._ComputeDiskStatusInner(instance, snode, anno_dev)
2131

    
2132
  def _ComputeDiskStatusInner(self, instance, snode, dev):
2133
    """Compute block device status.
2134

2135
    @attention: The device has to be annotated already.
2136

2137
    """
2138
    if dev.dev_type in constants.LDS_DRBD:
2139
      # we change the snode then (otherwise we use the one passed in)
2140
      if dev.logical_id[0] == instance.primary_node:
2141
        snode = dev.logical_id[1]
2142
      else:
2143
        snode = dev.logical_id[0]
2144

    
2145
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
2146
                                              instance, dev)
2147
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
2148

    
2149
    if dev.children:
2150
      dev_children = map(compat.partial(self._ComputeDiskStatusInner,
2151
                                        instance, snode),
2152
                         dev.children)
2153
    else:
2154
      dev_children = []
2155

    
2156
    return {
2157
      "iv_name": dev.iv_name,
2158
      "dev_type": dev.dev_type,
2159
      "logical_id": dev.logical_id,
2160
      "physical_id": dev.physical_id,
2161
      "pstatus": dev_pstatus,
2162
      "sstatus": dev_sstatus,
2163
      "children": dev_children,
2164
      "mode": dev.mode,
2165
      "size": dev.size,
2166
      "name": dev.name,
2167
      "uuid": dev.uuid,
2168
      }
2169

    
2170
  def Exec(self, feedback_fn):
2171
    """Gather and return data"""
2172
    result = {}
2173

    
2174
    cluster = self.cfg.GetClusterInfo()
2175

    
2176
    node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
2177
    nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
2178

    
2179
    groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
2180
                                                 for node in nodes.values()))
2181

    
2182
    group2name_fn = lambda uuid: groups[uuid].name
2183
    for instance in self.wanted_instances:
2184
      pnode = nodes[instance.primary_node]
2185

    
2186
      if self.op.static or pnode.offline:
2187
        remote_state = None
2188
        if pnode.offline:
2189
          self.LogWarning("Primary node %s is marked offline, returning static"
2190
                          " information only for instance %s" %
2191
                          (pnode.name, instance.name))
2192
      else:
2193
        remote_info = self.rpc.call_instance_info(instance.primary_node,
2194
                                                  instance.name,
2195
                                                  instance.hypervisor)
2196
        remote_info.Raise("Error checking node %s" % instance.primary_node)
2197
        remote_info = remote_info.payload
2198
        if remote_info and "state" in remote_info:
2199
          remote_state = "up"
2200
        else:
2201
          if instance.admin_state == constants.ADMINST_UP:
2202
            remote_state = "down"
2203
          else:
2204
            remote_state = instance.admin_state
2205

    
2206
      disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
2207
                  instance.disks)
2208

    
2209
      snodes_group_uuids = [nodes[snode_name].group
2210
                            for snode_name in instance.secondary_nodes]
2211

    
2212
      result[instance.name] = {
2213
        "name": instance.name,
2214
        "config_state": instance.admin_state,
2215
        "run_state": remote_state,
2216
        "pnode": instance.primary_node,
2217
        "pnode_group_uuid": pnode.group,
2218
        "pnode_group_name": group2name_fn(pnode.group),
2219
        "snodes": instance.secondary_nodes,
2220
        "snodes_group_uuids": snodes_group_uuids,
2221
        "snodes_group_names": map(group2name_fn, snodes_group_uuids),
2222
        "os": instance.os,
2223
        # this happens to be the same format used for hooks
2224
        "nics": _NICListToTuple(self, instance.nics),
2225
        "disk_template": instance.disk_template,
2226
        "disks": disks,
2227
        "hypervisor": instance.hypervisor,
2228
        "network_port": instance.network_port,
2229
        "hv_instance": instance.hvparams,
2230
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
2231
        "be_instance": instance.beparams,
2232
        "be_actual": cluster.FillBE(instance),
2233
        "os_instance": instance.osparams,
2234
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
2235
        "serial_no": instance.serial_no,
2236
        "mtime": instance.mtime,
2237
        "ctime": instance.ctime,
2238
        "uuid": instance.uuid,
2239
        }
2240

    
2241
    return result
2242

    
2243

    
2244
class LUInstanceStartup(LogicalUnit):
2245
  """Starts an instance.
2246

2247
  """
2248
  HPATH = "instance-start"
2249
  HTYPE = constants.HTYPE_INSTANCE
2250
  REQ_BGL = False
2251

    
2252
  def CheckArguments(self):
2253
    # extra beparams
2254
    if self.op.beparams:
2255
      # fill the beparams dict
2256
      objects.UpgradeBeParams(self.op.beparams)
2257
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2258

    
2259
  def ExpandNames(self):
2260
    self._ExpandAndLockInstance()
2261
    self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
2262

    
2263
  def DeclareLocks(self, level):
2264
    if level == locking.LEVEL_NODE_RES:
2265
      self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES)
2266

    
2267
  def BuildHooksEnv(self):
2268
    """Build hooks env.
2269

2270
    This runs on master, primary and secondary nodes of the instance.
2271

2272
    """
2273
    env = {
2274
      "FORCE": self.op.force,
2275
      }
2276

    
2277
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2278

    
2279
    return env
2280

    
2281
  def BuildHooksNodes(self):
2282
    """Build hooks nodes.
2283

2284
    """
2285
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2286
    return (nl, nl)
2287

    
2288
  def CheckPrereq(self):
2289
    """Check prerequisites.
2290

2291
    This checks that the instance is in the cluster.
2292

2293
    """
2294
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2295
    assert self.instance is not None, \
2296
      "Cannot retrieve locked instance %s" % self.op.instance_name
2297

    
2298
    # extra hvparams
2299
    if self.op.hvparams:
2300
      # check hypervisor parameter syntax (locally)
2301
      cluster = self.cfg.GetClusterInfo()
2302
      utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
2303
      filled_hvp = cluster.FillHV(instance)
2304
      filled_hvp.update(self.op.hvparams)
2305
      hv_type = hypervisor.GetHypervisorClass(instance.hypervisor)
2306
      hv_type.CheckParameterSyntax(filled_hvp)
2307
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
2308

    
2309
    _CheckInstanceState(self, instance, INSTANCE_ONLINE)
2310

    
2311
    self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
2312

    
2313
    if self.primary_offline and self.op.ignore_offline_nodes:
2314
      self.LogWarning("Ignoring offline primary node")
2315

    
2316
      if self.op.hvparams or self.op.beparams:
2317
        self.LogWarning("Overridden parameters are ignored")
2318
    else:
2319
      _CheckNodeOnline(self, instance.primary_node)
2320

    
2321
      bep = self.cfg.GetClusterInfo().FillBE(instance)
2322
      bep.update(self.op.beparams)
2323

    
2324
      # check bridges existence
2325
      _CheckInstanceBridgesExist(self, instance)
2326

    
2327
      remote_info = self.rpc.call_instance_info(instance.primary_node,
2328
                                                instance.name,
2329
                                                instance.hypervisor)
2330
      remote_info.Raise("Error checking node %s" % instance.primary_node,
2331
                        prereq=True, ecode=errors.ECODE_ENVIRON)
2332
      if not remote_info.payload: # not running already
2333
        _CheckNodeFreeMemory(self, instance.primary_node,
2334
                             "starting instance %s" % instance.name,
2335
                             bep[constants.BE_MINMEM], instance.hypervisor)
2336

    
2337
  def Exec(self, feedback_fn):
2338
    """Start the instance.
2339

2340
    """
2341
    instance = self.instance
2342
    force = self.op.force
2343
    reason = self.op.reason
2344

    
2345
    if not self.op.no_remember:
2346
      self.cfg.MarkInstanceUp(instance.name)
2347

    
2348
    if self.primary_offline:
2349
      assert self.op.ignore_offline_nodes
2350
      self.LogInfo("Primary node offline, marked instance as started")
2351
    else:
2352
      node_current = instance.primary_node
2353

    
2354
      _StartInstanceDisks(self, instance, force)
2355

    
2356
      result = \
2357
        self.rpc.call_instance_start(node_current,
2358
                                     (instance, self.op.hvparams,
2359
                                      self.op.beparams),
2360
                                     self.op.startup_paused, reason)
2361
      msg = result.fail_msg
2362
      if msg:
2363
        _ShutdownInstanceDisks(self, instance)
2364
        raise errors.OpExecError("Could not start instance: %s" % msg)
2365

    
2366

    
2367
class LUInstanceShutdown(LogicalUnit):
2368
  """Shutdown an instance.
2369

2370
  """
2371
  HPATH = "instance-stop"
2372
  HTYPE = constants.HTYPE_INSTANCE
2373
  REQ_BGL = False
2374

    
2375
  def ExpandNames(self):
2376
    self._ExpandAndLockInstance()
2377

    
2378
  def BuildHooksEnv(self):
2379
    """Build hooks env.
2380

2381
    This runs on master, primary and secondary nodes of the instance.
2382

2383
    """
2384
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2385
    env["TIMEOUT"] = self.op.timeout
2386
    return env
2387

    
2388
  def BuildHooksNodes(self):
2389
    """Build hooks nodes.
2390

2391
    """
2392
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2393
    return (nl, nl)
2394

    
2395
  def CheckPrereq(self):
2396
    """Check prerequisites.
2397

2398
    This checks that the instance is in the cluster.
2399

2400
    """
2401
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2402
    assert self.instance is not None, \
2403
      "Cannot retrieve locked instance %s" % self.op.instance_name
2404

    
2405
    if not self.op.force:
2406
      _CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
2407
    else:
2408
      self.LogWarning("Ignoring offline instance check")
2409

    
2410
    self.primary_offline = \
2411
      self.cfg.GetNodeInfo(self.instance.primary_node).offline
2412

    
2413
    if self.primary_offline and self.op.ignore_offline_nodes:
2414
      self.LogWarning("Ignoring offline primary node")
2415
    else:
2416
      _CheckNodeOnline(self, self.instance.primary_node)
2417

    
2418
  def Exec(self, feedback_fn):
2419
    """Shutdown the instance.
2420

2421
    """
2422
    instance = self.instance
2423
    node_current = instance.primary_node
2424
    timeout = self.op.timeout
2425
    reason = self.op.reason
2426

    
2427
    # If the instance is offline we shouldn't mark it as down, as that
2428
    # resets the offline flag.
2429
    if not self.op.no_remember and instance.admin_state in INSTANCE_ONLINE:
2430
      self.cfg.MarkInstanceDown(instance.name)
2431

    
2432
    if self.primary_offline:
2433
      assert self.op.ignore_offline_nodes
2434
      self.LogInfo("Primary node offline, marked instance as stopped")
2435
    else:
2436
      result = self.rpc.call_instance_shutdown(node_current, instance, timeout,
2437
                                               reason)
2438
      msg = result.fail_msg
2439
      if msg:
2440
        self.LogWarning("Could not shutdown instance: %s", msg)
2441

    
2442
      _ShutdownInstanceDisks(self, instance)
2443

    
2444

    
2445
class LUInstanceReinstall(LogicalUnit):
2446
  """Reinstall an instance.
2447

2448
  """
2449
  HPATH = "instance-reinstall"
2450
  HTYPE = constants.HTYPE_INSTANCE
2451
  REQ_BGL = False
2452

    
2453
  def ExpandNames(self):
2454
    self._ExpandAndLockInstance()
2455

    
2456
  def BuildHooksEnv(self):
2457
    """Build hooks env.
2458

2459
    This runs on master, primary and secondary nodes of the instance.
2460

2461
    """
2462
    return _BuildInstanceHookEnvByObject(self, self.instance)
2463

    
2464
  def BuildHooksNodes(self):
2465
    """Build hooks nodes.
2466

2467
    """
2468
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2469
    return (nl, nl)
2470

    
2471
  def CheckPrereq(self):
2472
    """Check prerequisites.
2473

2474
    This checks that the instance is in the cluster and is not running.
2475

2476
    """
2477
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2478
    assert instance is not None, \
2479
      "Cannot retrieve locked instance %s" % self.op.instance_name
2480
    _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
2481
                     " offline, cannot reinstall")
2482

    
2483
    if instance.disk_template == constants.DT_DISKLESS:
2484
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2485
                                 self.op.instance_name,
2486
                                 errors.ECODE_INVAL)
2487
    _CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall")
2488

    
2489
    if self.op.os_type is not None:
2490
      # OS verification
2491
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
2492
      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
2493
      instance_os = self.op.os_type
2494
    else:
2495
      instance_os = instance.os
2496

    
2497
    nodelist = list(instance.all_nodes)
2498

    
2499
    if self.op.osparams:
2500
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
2501
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2502
      self.os_inst = i_osdict # the new dict (without defaults)
2503
    else:
2504
      self.os_inst = None
2505

    
2506
    self.instance = instance
2507

    
2508
  def Exec(self, feedback_fn):
2509
    """Reinstall the instance.
2510

2511
    """
2512
    inst = self.instance
2513

    
2514
    if self.op.os_type is not None:
2515
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2516
      inst.os = self.op.os_type
2517
      # Write to configuration
2518
      self.cfg.Update(inst, feedback_fn)
2519

    
2520
    _StartInstanceDisks(self, inst, None)
2521
    try:
2522
      feedback_fn("Running the instance OS create scripts...")
2523
      # FIXME: pass debug option from opcode to backend
2524
      result = self.rpc.call_instance_os_add(inst.primary_node,
2525
                                             (inst, self.os_inst), True,
2526
                                             self.op.debug_level)
2527
      result.Raise("Could not install OS for instance %s on node %s" %
2528
                   (inst.name, inst.primary_node))
2529
    finally:
2530
      _ShutdownInstanceDisks(self, inst)
2531

    
2532

    
2533
class LUInstanceReboot(LogicalUnit):
2534
  """Reboot an instance.
2535

2536
  """
2537
  HPATH = "instance-reboot"
2538
  HTYPE = constants.HTYPE_INSTANCE
2539
  REQ_BGL = False
2540

    
2541
  def ExpandNames(self):
2542
    self._ExpandAndLockInstance()
2543

    
2544
  def BuildHooksEnv(self):
2545
    """Build hooks env.
2546

2547
    This runs on master, primary and secondary nodes of the instance.
2548

2549
    """
2550
    env = {
2551
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2552
      "REBOOT_TYPE": self.op.reboot_type,
2553
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
2554
      }
2555

    
2556
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2557

    
2558
    return env
2559

    
2560
  def BuildHooksNodes(self):
2561
    """Build hooks nodes.
2562

2563
    """
2564
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2565
    return (nl, nl)
2566

    
2567
  def CheckPrereq(self):
2568
    """Check prerequisites.
2569

2570
    This checks that the instance is in the cluster.
2571

2572
    """
2573
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2574
    assert self.instance is not None, \
2575
      "Cannot retrieve locked instance %s" % self.op.instance_name
2576
    _CheckInstanceState(self, instance, INSTANCE_ONLINE)
2577
    _CheckNodeOnline(self, instance.primary_node)
2578

    
2579
    # check bridges existence
2580
    _CheckInstanceBridgesExist(self, instance)
2581

    
2582
  def Exec(self, feedback_fn):
2583
    """Reboot the instance.
2584

2585
    """
2586
    instance = self.instance
2587
    ignore_secondaries = self.op.ignore_secondaries
2588
    reboot_type = self.op.reboot_type
2589
    reason = self.op.reason
2590

    
2591
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2592
                                              instance.name,
2593
                                              instance.hypervisor)
2594
    remote_info.Raise("Error checking node %s" % instance.primary_node)
2595
    instance_running = bool(remote_info.payload)
2596

    
2597
    node_current = instance.primary_node
2598

    
2599
    if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2600
                                            constants.INSTANCE_REBOOT_HARD]:
2601
      for disk in instance.disks:
2602
        self.cfg.SetDiskID(disk, node_current)
2603
      result = self.rpc.call_instance_reboot(node_current, instance,
2604
                                             reboot_type,
2605
                                             self.op.shutdown_timeout, reason)
2606
      result.Raise("Could not reboot instance")
2607
    else:
2608
      if instance_running:
2609
        result = self.rpc.call_instance_shutdown(node_current, instance,
2610
                                                 self.op.shutdown_timeout,
2611
                                                 reason)
2612
        result.Raise("Could not shutdown instance for full reboot")
2613
        _ShutdownInstanceDisks(self, instance)
2614
      else:
2615
        self.LogInfo("Instance %s was already stopped, starting now",
2616
                     instance.name)
2617
      _StartInstanceDisks(self, instance, ignore_secondaries)
2618
      result = self.rpc.call_instance_start(node_current,
2619
                                            (instance, None, None), False,
2620
                                            reason)
2621
      msg = result.fail_msg
2622
      if msg:
2623
        _ShutdownInstanceDisks(self, instance)
2624
        raise errors.OpExecError("Could not start instance for"
2625
                                 " full reboot: %s" % msg)
2626

    
2627
    self.cfg.MarkInstanceUp(instance.name)
2628

    
2629

    
2630
class LUInstanceConsole(NoHooksLU):
2631
  """Connect to an instance's console.
2632

2633
  This is somewhat special in that it returns the command line that
2634
  you need to run on the master node in order to connect to the
2635
  console.
2636

2637
  """
2638
  REQ_BGL = False
2639

    
2640
  def ExpandNames(self):
2641
    self.share_locks = _ShareAll()
2642
    self._ExpandAndLockInstance()
2643

    
2644
  def CheckPrereq(self):
2645
    """Check prerequisites.
2646

2647
    This checks that the instance is in the cluster.
2648

2649
    """
2650
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2651
    assert self.instance is not None, \
2652
      "Cannot retrieve locked instance %s" % self.op.instance_name
2653
    _CheckNodeOnline(self, self.instance.primary_node)
2654

    
2655
  def Exec(self, feedback_fn):
2656
    """Connect to the console of an instance
2657

2658
    """
2659
    instance = self.instance
2660
    node = instance.primary_node
2661

    
2662
    node_insts = self.rpc.call_instance_list([node],
2663
                                             [instance.hypervisor])[node]
2664
    node_insts.Raise("Can't get node information from %s" % node)
2665

    
2666
    if instance.name not in node_insts.payload:
2667
      if instance.admin_state == constants.ADMINST_UP:
2668
        state = constants.INSTST_ERRORDOWN
2669
      elif instance.admin_state == constants.ADMINST_DOWN:
2670
        state = constants.INSTST_ADMINDOWN
2671
      else:
2672
        state = constants.INSTST_ADMINOFFLINE
2673
      raise errors.OpExecError("Instance %s is not running (state %s)" %
2674
                               (instance.name, state))
2675

    
2676
    logging.debug("Connecting to console of %s on %s", instance.name, node)
2677

    
2678
    return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
2679

    
2680

    
2681
class LUInstanceMultiAlloc(NoHooksLU):
2682
  """Allocates multiple instances at the same time.
2683

2684
  """
2685
  REQ_BGL = False
2686

    
2687
  def CheckArguments(self):
2688
    """Check arguments.
2689

2690
    """
2691
    nodes = []
2692
    for inst in self.op.instances:
2693
      if inst.iallocator is not None:
2694
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
2695
                                   " instance objects", errors.ECODE_INVAL)
2696
      nodes.append(bool(inst.pnode))
2697
      if inst.disk_template in constants.DTS_INT_MIRROR:
2698
        nodes.append(bool(inst.snode))
2699

    
2700
    has_nodes = compat.any(nodes)
2701
    if compat.all(nodes) ^ has_nodes:
2702
      raise errors.OpPrereqError("There are instance objects providing"
2703
                                 " pnode/snode while others do not",
2704
                                 errors.ECODE_INVAL)
2705

    
2706
    if self.op.iallocator is None:
2707
      default_iallocator = self.cfg.GetDefaultIAllocator()
2708
      if default_iallocator and has_nodes:
2709
        self.op.iallocator = default_iallocator
2710
      else:
2711
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
2712
                                   " given and no cluster-wide default"
2713
                                   " iallocator found; please specify either"
2714
                                   " an iallocator or nodes on the instances"
2715
                                   " or set a cluster-wide default iallocator",
2716
                                   errors.ECODE_INVAL)
2717

    
2718
    _CheckOpportunisticLocking(self.op)
2719

    
2720
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
2721
    if dups:
2722
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
2723
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
2724

    
2725
  def ExpandNames(self):
2726
    """Calculate the locks.
2727

2728
    """
2729
    self.share_locks = _ShareAll()
2730
    self.needed_locks = {
2731
      # iallocator will select nodes and even if no iallocator is used,
2732
      # collisions with LUInstanceCreate should be avoided
2733
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
2734
      }
2735

    
2736
    if self.op.iallocator:
2737
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2738
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
2739

    
2740
      if self.op.opportunistic_locking:
2741
        self.opportunistic_locks[locking.LEVEL_NODE] = True
2742
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
2743
    else:
2744
      nodeslist = []
2745
      for inst in self.op.instances:
2746
        inst.pnode = _ExpandNodeName(self.cfg, inst.pnode)
2747
        nodeslist.append(inst.pnode)
2748
        if inst.snode is not None:
2749
          inst.snode = _ExpandNodeName(self.cfg, inst.snode)
2750
          nodeslist.append(inst.snode)
2751

    
2752
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
2753
      # Lock resources of instance's primary and secondary nodes (copy to
2754
      # prevent accidential modification)
2755
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2756

    
2757
  def CheckPrereq(self):
2758
    """Check prerequisite.
2759

2760
    """
2761
    cluster = self.cfg.GetClusterInfo()
2762
    default_vg = self.cfg.GetVGName()
2763
    ec_id = self.proc.GetECId()
2764

    
2765
    if self.op.opportunistic_locking:
2766
      # Only consider nodes for which a lock is held
2767
      node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
2768
    else:
2769
      node_whitelist = None
2770

    
2771
    insts = [_CreateInstanceAllocRequest(op, _ComputeDisks(op, default_vg),
2772
                                         _ComputeNics(op, cluster, None,
2773
                                                      self.cfg, ec_id),
2774
                                         _ComputeFullBeParams(op, cluster),
2775
                                         node_whitelist)
2776
             for op in self.op.instances]
2777

    
2778
    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2779
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2780

    
2781
    ial.Run(self.op.iallocator)
2782

    
2783
    if not ial.success:
2784
      raise errors.OpPrereqError("Can't compute nodes using"
2785
                                 " iallocator '%s': %s" %
2786
                                 (self.op.iallocator, ial.info),
2787
                                 errors.ECODE_NORES)
2788

    
2789
    self.ia_result = ial.result
2790

    
2791
    if self.op.dry_run:
2792
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2793
        constants.JOB_IDS_KEY: [],
2794
        })
2795

    
2796
  def _ConstructPartialResult(self):
2797
    """Contructs the partial result.
2798

2799
    """
2800
    (allocatable, failed) = self.ia_result
2801
    return {
2802
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
2803
        map(compat.fst, allocatable),
2804
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
2805
      }
2806

    
2807
  def Exec(self, feedback_fn):
2808
    """Executes the opcode.
2809

2810
    """
2811
    op2inst = dict((op.instance_name, op) for op in self.op.instances)
2812
    (allocatable, failed) = self.ia_result
2813

    
2814
    jobs = []
2815
    for (name, nodes) in allocatable:
2816
      op = op2inst.pop(name)
2817

    
2818
      if len(nodes) > 1:
2819
        (op.pnode, op.snode) = nodes
2820
      else:
2821
        (op.pnode,) = nodes
2822

    
2823
      jobs.append([op])
2824

    
2825
    missing = set(op2inst.keys()) - set(failed)
2826
    assert not missing, \
2827
      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
2828

    
2829
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2830

    
2831

    
2832
class _InstNicModPrivate:
2833
  """Data structure for network interface modifications.
2834

2835
  Used by L{LUInstanceSetParams}.
2836

2837
  """
2838
  def __init__(self):
2839
    self.params = None
2840
    self.filled = None
2841

    
2842

    
2843
def PrepareContainerMods(mods, private_fn):
2844
  """Prepares a list of container modifications by adding a private data field.
2845

2846
  @type mods: list of tuples; (operation, index, parameters)
2847
  @param mods: List of modifications
2848
  @type private_fn: callable or None
2849
  @param private_fn: Callable for constructing a private data field for a
2850
    modification
2851
  @rtype: list
2852

2853
  """
2854
  if private_fn is None:
2855
    fn = lambda: None
2856
  else:
2857
    fn = private_fn
2858

    
2859
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2860

    
2861

    
2862
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
2863
  """Checks if nodes have enough physical CPUs
2864

2865
  This function checks if all given nodes have the needed number of
2866
  physical CPUs. In case any node has less CPUs or we cannot get the
2867
  information from the node, this function raises an OpPrereqError
2868
  exception.
2869

2870
  @type lu: C{LogicalUnit}
2871
  @param lu: a logical unit from which we get configuration data
2872
  @type nodenames: C{list}
2873
  @param nodenames: the list of node names to check
2874
  @type requested: C{int}
2875
  @param requested: the minimum acceptable number of physical CPUs
2876
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2877
      or we cannot check the node
2878

2879
  """
2880
  nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2881
  for node in nodenames:
2882
    info = nodeinfo[node]
2883
    info.Raise("Cannot get current information from node %s" % node,
2884
               prereq=True, ecode=errors.ECODE_ENVIRON)
2885
    (_, _, (hv_info, )) = info.payload
2886
    num_cpus = hv_info.get("cpu_total", None)
2887
    if not isinstance(num_cpus, int):
2888
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2889
                                 " on node %s, result was '%s'" %
2890
                                 (node, num_cpus), errors.ECODE_ENVIRON)
2891
    if requested > num_cpus:
2892
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2893
                                 "required" % (node, num_cpus, requested),
2894
                                 errors.ECODE_NORES)
2895

    
2896

    
2897
def GetItemFromContainer(identifier, kind, container):
2898
  """Return the item refered by the identifier.
2899

2900
  @type identifier: string
2901
  @param identifier: Item index or name or UUID
2902
  @type kind: string
2903
  @param kind: One-word item description
2904
  @type container: list
2905
  @param container: Container to get the item from
2906

2907
  """
2908
  # Index
2909
  try:
2910
    idx = int(identifier)
2911
    if idx == -1:
2912
      # Append
2913
      absidx = len(container) - 1
2914
    elif idx < 0:
2915
      raise IndexError("Not accepting negative indices other than -1")
2916
    elif idx > len(container):
2917
      raise IndexError("Got %s index %s, but there are only %s" %
2918
                       (kind, idx, len(container)))
2919
    else:
2920
      absidx = idx
2921
    return (absidx, container[idx])
2922
  except ValueError:
2923
    pass
2924

    
2925
  for idx, item in enumerate(container):
2926
    if item.uuid == identifier or item.name == identifier:
2927
      return (idx, item)
2928

    
2929
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2930
                             (kind, identifier), errors.ECODE_NOENT)
2931

    
2932

    
2933
def ApplyContainerMods(kind, container, chgdesc, mods,
2934
                       create_fn, modify_fn, remove_fn):
2935
  """Applies descriptions in C{mods} to C{container}.
2936

2937
  @type kind: string
2938
  @param kind: One-word item description
2939
  @type container: list
2940
  @param container: Container to modify
2941
  @type chgdesc: None or list
2942
  @param chgdesc: List of applied changes
2943
  @type mods: list
2944
  @param mods: Modifications as returned by L{PrepareContainerMods}
2945
  @type create_fn: callable
2946
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2947
    receives absolute item index, parameters and private data object as added
2948
    by L{PrepareContainerMods}, returns tuple containing new item and changes
2949
    as list
2950
  @type modify_fn: callable
2951
  @param modify_fn: Callback for modifying an existing item
2952
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2953
    and private data object as added by L{PrepareContainerMods}, returns
2954
    changes as list
2955
  @type remove_fn: callable
2956
  @param remove_fn: Callback on removing item; receives absolute item index,
2957
    item and private data object as added by L{PrepareContainerMods}
2958

2959
  """
2960
  for (op, identifier, params, private) in mods:
2961
    changes = None
2962

    
2963
    if op == constants.DDM_ADD:
2964
      # Calculate where item will be added
2965
      # When adding an item, identifier can only be an index
2966
      try:
2967
        idx = int(identifier)
2968
      except ValueError:
2969
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2970
                                   " identifier for %s" % constants.DDM_ADD,
2971
                                   errors.ECODE_INVAL)
2972
      if idx == -1:
2973
        addidx = len(container)
2974
      else:
2975
        if idx < 0:
2976
          raise IndexError("Not accepting negative indices other than -1")
2977
        elif idx > len(container):
2978
          raise IndexError("Got %s index %s, but there are only %s" %
2979
                           (kind, idx, len(container)))
2980
        addidx = idx
2981

    
2982
      if create_fn is None:
2983
        item = params
2984
      else:
2985
        (item, changes) = create_fn(addidx, params, private)
2986

    
2987
      if idx == -1:
2988
        container.append(item)
2989
      else:
2990
        assert idx >= 0
2991
        assert idx <= len(container)
2992
        # list.insert does so before the specified index
2993
        container.insert(idx, item)
2994
    else:
2995
      # Retrieve existing item
2996
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2997

    
2998
      if op == constants.DDM_REMOVE:
2999
        assert not params
3000

    
3001
        if remove_fn is not None:
3002
          remove_fn(absidx, item, private)
3003

    
3004
        changes = [("%s/%s" % (kind, absidx), "remove")]
3005

    
3006
        assert container[absidx] == item
3007
        del container[absidx]
3008
      elif op == constants.DDM_MODIFY:
3009
        if modify_fn is not None:
3010
          changes = modify_fn(absidx, item, params, private)
3011
      else:
3012
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
3013

    
3014
    assert _TApplyContModsCbChanges(changes)
3015

    
3016
    if not (chgdesc is None or changes is None):
3017
      chgdesc.extend(changes)
3018

    
3019

    
3020
def _UpdateIvNames(base_index, disks):
3021
  """Updates the C{iv_name} attribute of disks.
3022

3023
  @type disks: list of L{objects.Disk}
3024

3025
  """
3026
  for (idx, disk) in enumerate(disks):
3027
    disk.iv_name = "disk/%s" % (base_index + idx, )
3028

    
3029

    
3030
class LUInstanceSetParams(LogicalUnit):
3031
  """Modifies an instances's parameters.
3032

3033
  """
3034
  HPATH = "instance-modify"
3035
  HTYPE = constants.HTYPE_INSTANCE
3036
  REQ_BGL = False
3037

    
3038
  @staticmethod
3039
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
3040
    assert ht.TList(mods)
3041
    assert not mods or len(mods[0]) in (2, 3)
3042

    
3043
    if mods and len(mods[0]) == 2:
3044
      result = []
3045

    
3046
      addremove = 0
3047
      for op, params in mods:
3048
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
3049
          result.append((op, -1, params))
3050
          addremove += 1
3051

    
3052
          if addremove > 1:
3053
            raise errors.OpPrereqError("Only one %s add or remove operation is"
3054
                                       " supported at a time" % kind,
3055
                                       errors.ECODE_INVAL)
3056
        else:
3057
          result.append((constants.DDM_MODIFY, op, params))
3058

    
3059
      assert verify_fn(result)
3060
    else:
3061
      result = mods
3062

    
3063
    return result
3064

    
3065
  @staticmethod
3066
  def _CheckMods(kind, mods, key_types, item_fn):
3067
    """Ensures requested disk/NIC modifications are valid.
3068

3069
    """
3070
    for (op, _, params) in mods:
3071
      assert ht.TDict(params)
3072

    
3073
      # If 'key_types' is an empty dict, we assume we have an
3074
      # 'ext' template and thus do not ForceDictType
3075
      if key_types:
3076
        utils.ForceDictType(params, key_types)
3077

    
3078
      if op == constants.DDM_REMOVE:
3079
        if params:
3080
          raise errors.OpPrereqError("No settings should be passed when"
3081
                                     " removing a %s" % kind,
3082
                                     errors.ECODE_INVAL)
3083
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
3084
        item_fn(op, params)
3085
      else:
3086
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
3087

    
3088
  @staticmethod
3089
  def _VerifyDiskModification(op, params):
3090
    """Verifies a disk modification.
3091

3092
    """
3093
    if op == constants.DDM_ADD:
3094
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
3095
      if mode not in constants.DISK_ACCESS_SET:
3096
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
3097
                                   errors.ECODE_INVAL)
3098

    
3099
      size = params.get(constants.IDISK_SIZE, None)
3100
      if size is None:
3101
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
3102
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
3103

    
3104
      try:
3105
        size = int(size)
3106
      except (TypeError, ValueError), err:
3107
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
3108
                                   errors.ECODE_INVAL)
3109

    
3110
      params[constants.IDISK_SIZE] = size
3111
      name = params.get(constants.IDISK_NAME, None)
3112
      if name is not None and name.lower() == constants.VALUE_NONE:
3113
        params[constants.IDISK_NAME] = None
3114

    
3115
    elif op == constants.DDM_MODIFY:
3116
      if constants.IDISK_SIZE in params:
3117
        raise errors.OpPrereqError("Disk size change not possible, use"
3118
                                   " grow-disk", errors.ECODE_INVAL)
3119
      if len(params) > 2:
3120
        raise errors.OpPrereqError("Disk modification doesn't support"
3121
                                   " additional arbitrary parameters",
3122
                                   errors.ECODE_INVAL)
3123
      name = params.get(constants.IDISK_NAME, None)
3124
      if name is not None and name.lower() == constants.VALUE_NONE:
3125
        params[constants.IDISK_NAME] = None
3126

    
3127
  @staticmethod
3128
  def _VerifyNicModification(op, params):
3129
    """Verifies a network interface modification.
3130

3131
    """
3132
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
3133
      ip = params.get(constants.INIC_IP, None)
3134
      name = params.get(constants.INIC_NAME, None)
3135
      req_net = params.get(constants.INIC_NETWORK, None)
3136
      link = params.get(constants.NIC_LINK, None)
3137
      mode = params.get(constants.NIC_MODE, None)
3138
      if name is not None and name.lower() == constants.VALUE_NONE:
3139
        params[constants.INIC_NAME] = None
3140
      if req_net is not None:
3141
        if req_net.lower() == constants.VALUE_NONE:
3142
          params[constants.INIC_NETWORK] = None
3143
          req_net = None
3144
        elif link is not None or mode is not None:
3145
          raise errors.OpPrereqError("If network is given"
3146
                                     " mode or link should not",
3147
                                     errors.ECODE_INVAL)
3148

    
3149
      if op == constants.DDM_ADD:
3150
        macaddr = params.get(constants.INIC_MAC, None)
3151
        if macaddr is None:
3152
          params[constants.INIC_MAC] = constants.VALUE_AUTO
3153

    
3154
      if ip is not None:
3155
        if ip.lower() == constants.VALUE_NONE:
3156
          params[constants.INIC_IP] = None
3157
        else:
3158
          if ip.lower() == constants.NIC_IP_POOL:
3159
            if op == constants.DDM_ADD and req_net is None:
3160
              raise errors.OpPrereqError("If ip=pool, parameter network"
3161
                                         " cannot be none",
3162
                                         errors.ECODE_INVAL)
3163
          else:
3164
            if not netutils.IPAddress.IsValid(ip):
3165
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
3166
                                         errors.ECODE_INVAL)
3167

    
3168
      if constants.INIC_MAC in params:
3169
        macaddr = params[constants.INIC_MAC]
3170
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3171
          macaddr = utils.NormalizeAndValidateMac(macaddr)
3172

    
3173
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
3174
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
3175
                                     " modifying an existing NIC",
3176
                                     errors.ECODE_INVAL)
3177

    
3178
  def CheckArguments(self):
3179
    if not (self.op.nics or self.op.disks or self.op.disk_template or
3180
            self.op.hvparams or self.op.beparams or self.op.os_name or
3181
            self.op.offline is not None or self.op.runtime_mem or
3182
            self.op.pnode):
3183
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
3184

    
3185
    if self.op.hvparams:
3186
      _CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
3187
                            "hypervisor", "instance", "cluster")
3188

    
3189
    self.op.disks = self._UpgradeDiskNicMods(
3190
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
3191
    self.op.nics = self._UpgradeDiskNicMods(
3192
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
3193

    
3194
    if self.op.disks and self.op.disk_template is not None:
3195
      raise errors.OpPrereqError("Disk template conversion and other disk"
3196
                                 " changes not supported at the same time",
3197
                                 errors.ECODE_INVAL)
3198

    
3199
    if (self.op.disk_template and
3200
        self.op.disk_template in constants.DTS_INT_MIRROR and
3201
        self.op.remote_node is None):
3202
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
3203
                                 " one requires specifying a secondary node",
3204
                                 errors.ECODE_INVAL)
3205

    
3206
    # Check NIC modifications
3207
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
3208
                    self._VerifyNicModification)
3209

    
3210
    if self.op.pnode:
3211
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
3212

    
3213
  def ExpandNames(self):
3214
    self._ExpandAndLockInstance()
3215
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
3216
    # Can't even acquire node locks in shared mode as upcoming changes in
3217
    # Ganeti 2.6 will start to modify the node object on disk conversion
3218
    self.needed_locks[locking.LEVEL_NODE] = []
3219
    self.needed_locks[locking.LEVEL_NODE_RES] = []
3220
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3221
    # Look node group to look up the ipolicy
3222
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
3223

    
3224
  def DeclareLocks(self, level):
3225
    if level == locking.LEVEL_NODEGROUP:
3226
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3227
      # Acquire locks for the instance's nodegroups optimistically. Needs
3228
      # to be verified in CheckPrereq
3229
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
3230
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3231
    elif level == locking.LEVEL_NODE:
3232
      self._LockInstancesNodes()
3233
      if self.op.disk_template and self.op.remote_node:
3234
        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
3235
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
3236
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
3237
      # Copy node locks
3238
      self.needed_locks[locking.LEVEL_NODE_RES] = \
3239
        _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
3240

    
3241
  def BuildHooksEnv(self):
3242
    """Build hooks env.
3243

3244
    This runs on the master, primary and secondaries.
3245

3246
    """
3247
    args = {}
3248
    if constants.BE_MINMEM in self.be_new:
3249
      args["minmem"] = self.be_new[constants.BE_MINMEM]
3250
    if constants.BE_MAXMEM in self.be_new:
3251
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
3252
    if constants.BE_VCPUS in self.be_new:
3253
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
3254
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
3255
    # information at all.
3256

    
3257
    if self._new_nics is not None:
3258
      nics = []
3259

    
3260
      for nic in self._new_nics:
3261
        n = copy.deepcopy(nic)
3262
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
3263
        n.nicparams = nicparams
3264
        nics.append(_NICToTuple(self, n))
3265

    
3266
      args["nics"] = nics
3267

    
3268
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
3269
    if self.op.disk_template:
3270
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
3271
    if self.op.runtime_mem:
3272
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
3273

    
3274
    return env
3275

    
3276
  def BuildHooksNodes(self):
3277
    """Build hooks nodes.
3278

3279
    """
3280
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3281
    return (nl, nl)
3282

    
3283
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
3284
                              old_params, cluster, pnode):
3285

    
3286
    update_params_dict = dict([(key, params[key])
3287
                               for key in constants.NICS_PARAMETERS
3288
                               if key in params])
3289

    
3290
    req_link = update_params_dict.get(constants.NIC_LINK, None)
3291
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
3292

    
3293
    new_net_uuid = None
3294
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
3295
    if new_net_uuid_or_name:
3296
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
3297
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
3298

    
3299
    if old_net_uuid:
3300
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
3301

    
3302
    if new_net_uuid:
3303
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
3304
      if not netparams:
3305
        raise errors.OpPrereqError("No netparams found for the network"
3306
                                   " %s, probably not connected" %
3307
                                   new_net_obj.name, errors.ECODE_INVAL)
3308
      new_params = dict(netparams)
3309
    else:
3310
      new_params = _GetUpdatedParams(old_params, update_params_dict)
3311

    
3312
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
3313

    
3314
    new_filled_params = cluster.SimpleFillNIC(new_params)
3315
    objects.NIC.CheckParameterSyntax(new_filled_params)
3316

    
3317
    new_mode = new_filled_params[constants.NIC_MODE]
3318
    if new_mode == constants.NIC_MODE_BRIDGED:
3319
      bridge = new_filled_params[constants.NIC_LINK]
3320
      msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
3321
      if msg:
3322
        msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
3323
        if self.op.force:
3324
          self.warn.append(msg)
3325
        else:
3326
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
3327

    
3328
    elif new_mode == constants.NIC_MODE_ROUTED:
3329
      ip = params.get(constants.INIC_IP, old_ip)
3330
      if ip is None:
3331
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
3332
                                   " on a routed NIC", errors.ECODE_INVAL)
3333

    
3334
    elif new_mode == constants.NIC_MODE_OVS:
3335
      # TODO: check OVS link
3336
      self.LogInfo("OVS links are currently not checked for correctness")
3337

    
3338
    if constants.INIC_MAC in params:
3339
      mac = params[constants.INIC_MAC]
3340
      if mac is None:
3341
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
3342
                                   errors.ECODE_INVAL)
3343
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3344
        # otherwise generate the MAC address
3345
        params[constants.INIC_MAC] = \
3346
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
3347
      else:
3348
        # or validate/reserve the current one
3349
        try:
3350
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
3351
        except errors.ReservationError:
3352
          raise errors.OpPrereqError("MAC address '%s' already in use"
3353
                                     " in cluster" % mac,
3354
                                     errors.ECODE_NOTUNIQUE)
3355
    elif new_net_uuid != old_net_uuid:
3356

    
3357
      def get_net_prefix(net_uuid):
3358
        mac_prefix = None
3359
        if net_uuid:
3360
          nobj = self.cfg.GetNetwork(net_uuid)
3361
          mac_prefix = nobj.mac_prefix
3362

    
3363
        return mac_prefix
3364

    
3365
      new_prefix = get_net_prefix(new_net_uuid)
3366
      old_prefix = get_net_prefix(old_net_uuid)
3367
      if old_prefix != new_prefix:
3368
        params[constants.INIC_MAC] = \
3369
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
3370

    
3371
    # if there is a change in (ip, network) tuple
3372
    new_ip = params.get(constants.INIC_IP, old_ip)
3373
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
3374
      if new_ip:
3375
        # if IP is pool then require a network and generate one IP
3376
        if new_ip.lower() == constants.NIC_IP_POOL:
3377
          if new_net_uuid:
3378
            try:
3379
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
3380
            except errors.ReservationError:
3381
              raise errors.OpPrereqError("Unable to get a free IP"
3382
                                         " from the address pool",
3383
                                         errors.ECODE_STATE)
3384
            self.LogInfo("Chose IP %s from network %s",
3385
                         new_ip,
3386
                         new_net_obj.name)
3387
            params[constants.INIC_IP] = new_ip
3388
          else:
3389
            raise errors.OpPrereqError("ip=pool, but no network found",
3390
                                       errors.ECODE_INVAL)
3391
        # Reserve new IP if in the new network if any
3392
        elif new_net_uuid:
3393
          try:
3394
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
3395
            self.LogInfo("Reserving IP %s in network %s",
3396
                         new_ip, new_net_obj.name)
3397
          except errors.ReservationError:
3398
            raise errors.OpPrereqError("IP %s not available in network %s" %
3399
                                       (new_ip, new_net_obj.name),
3400
                                       errors.ECODE_NOTUNIQUE)
3401
        # new network is None so check if new IP is a conflicting IP
3402
        elif self.op.conflicts_check:
3403
          _CheckForConflictingIp(self, new_ip, pnode)
3404

    
3405
      # release old IP if old network is not None
3406
      if old_ip and old_net_uuid:
3407
        try:
3408
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
3409
        except errors.AddressPoolError:
3410
          logging.warning("Release IP %s not contained in network %s",
3411
                          old_ip, old_net_obj.name)
3412

    
3413
    # there are no changes in (ip, network) tuple and old network is not None
3414
    elif (old_net_uuid is not None and
3415
          (req_link is not None or req_mode is not None)):
3416
      raise errors.OpPrereqError("Not allowed to change link or mode of"
3417
                                 " a NIC that is connected to a network",
3418
                                 errors.ECODE_INVAL)
3419

    
3420
    private.params = new_params
3421
    private.filled = new_filled_params
3422

    
3423
  def _PreCheckDiskTemplate(self, pnode_info):
3424
    """CheckPrereq checks related to a new disk template."""
3425
    # Arguments are passed to avoid configuration lookups
3426
    instance = self.instance
3427
    pnode = instance.primary_node
3428
    cluster = self.cluster
3429
    if instance.disk_template == self.op.disk_template:
3430
      raise errors.OpPrereqError("Instance already has disk template %s" %
3431
                                 instance.disk_template, errors.ECODE_INVAL)
3432

    
3433
    if (instance.disk_template,
3434
        self.op.disk_template) not in self._DISK_CONVERSIONS:
3435
      raise errors.OpPrereqError("Unsupported disk template conversion from"
3436
                                 " %s to %s" % (instance.disk_template,
3437
                                                self.op.disk_template),
3438
                                 errors.ECODE_INVAL)
3439
    _CheckInstanceState(self, instance, INSTANCE_DOWN,
3440
                        msg="cannot change disk template")
3441
    if self.op.disk_template in constants.DTS_INT_MIRROR:
3442
      if self.op.remote_node == pnode:
3443
        raise errors.OpPrereqError("Given new secondary node %s is the same"
3444
                                   " as the primary node of the instance" %
3445
                                   self.op.remote_node, errors.ECODE_STATE)
3446
      _CheckNodeOnline(self, self.op.remote_node)
3447
      _CheckNodeNotDrained(self, self.op.remote_node)
3448
      # FIXME: here we assume that the old instance type is DT_PLAIN
3449
      assert instance.disk_template == constants.DT_PLAIN
3450
      disks = [{constants.IDISK_SIZE: d.size,
3451
                constants.IDISK_VG: d.logical_id[0]}
3452
               for d in instance.disks]
3453
      required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
3454
      _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
3455

    
3456
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
3457
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
3458
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
3459
                                                              snode_group)
3460
      _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
3461
                              ignore=self.op.ignore_ipolicy)
3462
      if pnode_info.group != snode_info.group:
3463
        self.LogWarning("The primary and secondary nodes are in two"
3464
                        " different node groups; the disk parameters"
3465
                        " from the first disk's node group will be"
3466
                        " used")
3467

    
3468
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
3469
      # Make sure none of the nodes require exclusive storage
3470
      nodes = [pnode_info]
3471
      if self.op.disk_template in constants.DTS_INT_MIRROR:
3472
        assert snode_info
3473
        nodes.append(snode_info)
3474
      has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
3475
      if compat.any(map(has_es, nodes)):
3476
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
3477
                  " storage is enabled" % (instance.disk_template,
3478
                                           self.op.disk_template))
3479
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
3480

    
3481
  def CheckPrereq(self):
3482
    """Check prerequisites.
3483

3484
    This only checks the instance list against the existing names.
3485

3486
    """
3487
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
3488
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3489

    
3490
    cluster = self.cluster = self.cfg.GetClusterInfo()
3491
    assert self.instance is not None, \
3492
      "Cannot retrieve locked instance %s" % self.op.instance_name
3493

    
3494
    pnode = instance.primary_node
3495

    
3496
    self.warn = []
3497

    
3498
    if (self.op.pnode is not None and self.op.pnode != pnode and
3499
        not self.op.force):
3500
      # verify that the instance is not up
3501
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
3502
                                                  instance.hypervisor)
3503
      if instance_info.fail_msg:
3504
        self.warn.append("Can't get instance runtime information: %s" %
3505
                         instance_info.fail_msg)
3506
      elif instance_info.payload:
3507
        raise errors.OpPrereqError("Instance is still running on %s" % pnode,
3508
                                   errors.ECODE_STATE)
3509

    
3510
    assert pnode in self.owned_locks(locking.LEVEL_NODE)
3511
    nodelist = list(instance.all_nodes)
3512
    pnode_info = self.cfg.GetNodeInfo(pnode)
3513
    self.diskparams = self.cfg.GetInstanceDiskParams(instance)
3514

    
3515
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
3516
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
3517
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
3518

    
3519
    # dictionary with instance information after the modification
3520
    ispec = {}
3521

    
3522
    # Check disk modifications. This is done here and not in CheckArguments
3523
    # (as with NICs), because we need to know the instance's disk template
3524
    if instance.disk_template == constants.DT_EXT:
3525
      self._CheckMods("disk", self.op.disks, {},
3526
                      self._VerifyDiskModification)
3527
    else:
3528
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
3529
                      self._VerifyDiskModification)
3530

    
3531
    # Prepare disk/NIC modifications
3532
    self.diskmod = PrepareContainerMods(self.op.disks, None)
3533
    self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
3534

    
3535
    # Check the validity of the `provider' parameter
3536
    if instance.disk_template in constants.DT_EXT:
3537
      for mod in self.diskmod:
3538
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
3539
        if mod[0] == constants.DDM_ADD:
3540
          if ext_provider is None:
3541
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
3542
                                       " '%s' missing, during disk add" %
3543
                                       (constants.DT_EXT,
3544
                                        constants.IDISK_PROVIDER),
3545
                                       errors.ECODE_NOENT)
3546
        elif mod[0] == constants.DDM_MODIFY:
3547
          if ext_provider:
3548
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
3549
                                       " modification" %
3550
                                       constants.IDISK_PROVIDER,
3551
                                       errors.ECODE_INVAL)
3552
    else:
3553
      for mod in self.diskmod:
3554
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
3555
        if ext_provider is not None:
3556
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
3557
                                     " instances of type '%s'" %
3558
                                     (constants.IDISK_PROVIDER,
3559
                                      constants.DT_EXT),
3560
                                     errors.ECODE_INVAL)
3561

    
3562
    # OS change
3563
    if self.op.os_name and not self.op.force:
3564
      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
3565
                      self.op.force_variant)
3566
      instance_os = self.op.os_name
3567
    else:
3568
      instance_os = instance.os
3569

    
3570
    assert not (self.op.disk_template and self.op.disks), \
3571
      "Can't modify disk template and apply disk changes at the same time"
3572

    
3573
    if self.op.disk_template:
3574
      self._PreCheckDiskTemplate(pnode_info)
3575

    
3576
    # hvparams processing
3577
    if self.op.hvparams:
3578
      hv_type = instance.hypervisor
3579
      i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
3580
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
3581
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
3582

    
3583
      # local check
3584
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
3585
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
3586
      self.hv_proposed = self.hv_new = hv_new # the new actual values
3587
      self.hv_inst = i_hvdict # the new dict (without defaults)
3588
    else:
3589
      self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
3590
                                              instance.hvparams)
3591
      self.hv_new = self.hv_inst = {}
3592

    
3593
    # beparams processing
3594
    if self.op.beparams:
3595
      i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
3596
                                   use_none=True)
3597
      objects.UpgradeBeParams(i_bedict)
3598
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
3599
      be_new = cluster.SimpleFillBE(i_bedict)
3600
      self.be_proposed = self.be_new = be_new # the new actual values
3601
      self.be_inst = i_bedict # the new dict (without defaults)
3602
    else:
3603
      self.be_new = self.be_inst = {}
3604
      self.be_proposed = cluster.SimpleFillBE(instance.beparams)
3605
    be_old = cluster.FillBE(instance)
3606

    
3607
    # CPU param validation -- checking every time a parameter is
3608
    # changed to cover all cases where either CPU mask or vcpus have
3609
    # changed
3610
    if (constants.BE_VCPUS in self.be_proposed and
3611
        constants.HV_CPU_MASK in self.hv_proposed):
3612
      cpu_list = \
3613
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
3614
      # Verify mask is consistent with number of vCPUs. Can skip this
3615
      # test if only 1 entry in the CPU mask, which means same mask
3616
      # is applied to all vCPUs.
3617
      if (len(cpu_list) > 1 and
3618
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
3619
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
3620
                                   " CPU mask [%s]" %
3621
                                   (self.be_proposed[constants.BE_VCPUS],
3622
                                    self.hv_proposed[constants.HV_CPU_MASK]),
3623
                                   errors.ECODE_INVAL)
3624

    
3625
      # Only perform this test if a new CPU mask is given
3626
      if constants.HV_CPU_MASK in self.hv_new:
3627
        # Calculate the largest CPU number requested
3628
        max_requested_cpu = max(map(max, cpu_list))
3629
        # Check that all of the instance's nodes have enough physical CPUs to
3630
        # satisfy the requested CPU mask
3631
        _CheckNodesPhysicalCPUs(self, instance.all_nodes,
3632
                                max_requested_cpu + 1, instance.hypervisor)
3633

    
3634
    # osparams processing
3635
    if self.op.osparams:
3636
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
3637
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
3638
      self.os_inst = i_osdict # the new dict (without defaults)
3639
    else:
3640
      self.os_inst = {}
3641

    
3642
    #TODO(dynmem): do the appropriate check involving MINMEM
3643
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
3644
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
3645
      mem_check_list = [pnode]
3646
      if be_new[constants.BE_AUTO_BALANCE]:
3647
        # either we changed auto_balance to yes or it was from before
3648
        mem_check_list.extend(instance.secondary_nodes)
3649
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
3650
                                                  instance.hypervisor)
3651
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
3652
                                         [instance.hypervisor], False)
3653
      pninfo = nodeinfo[pnode]
3654
      msg = pninfo.fail_msg
3655
      if msg:
3656
        # Assume the primary node is unreachable and go ahead
3657
        self.warn.append("Can't get info from primary node %s: %s" %
3658
                         (pnode, msg))
3659
      else:
3660
        (_, _, (pnhvinfo, )) = pninfo.payload
3661
        if not isinstance(pnhvinfo.get("memory_free", None), int):
3662
          self.warn.append("Node data from primary node %s doesn't contain"
3663
                           " free memory information" % pnode)
3664
        elif instance_info.fail_msg:
3665
          self.warn.append("Can't get instance runtime information: %s" %
3666
                           instance_info.fail_msg)
3667
        else:
3668
          if instance_info.payload:
3669
            current_mem = int(instance_info.payload["memory"])
3670
          else:
3671
            # Assume instance not running
3672
            # (there is a slight race condition here, but it's not very
3673
            # probable, and we have no other way to check)
3674
            # TODO: Describe race condition
3675
            current_mem = 0
3676
          #TODO(dynmem): do the appropriate check involving MINMEM
3677
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
3678
                      pnhvinfo["memory_free"])
3679
          if miss_mem > 0:
3680
            raise errors.OpPrereqError("This change will prevent the instance"
3681
                                       " from starting, due to %d MB of memory"
3682
                                       " missing on its primary node" %
3683
                                       miss_mem, errors.ECODE_NORES)
3684

    
3685
      if be_new[constants.BE_AUTO_BALANCE]:
3686
        for node, nres in nodeinfo.items():
3687
          if node not in instance.secondary_nodes:
3688
            continue
3689
          nres.Raise("Can't get info from secondary node %s" % node,
3690
                     prereq=True, ecode=errors.ECODE_STATE)
3691
          (_, _, (nhvinfo, )) = nres.payload
3692
          if not isinstance(nhvinfo.get("memory_free", None), int):
3693
            raise errors.OpPrereqError("Secondary node %s didn't return free"
3694
                                       " memory information" % node,
3695
                                       errors.ECODE_STATE)
3696
          #TODO(dynmem): do the appropriate check involving MINMEM
3697
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3698
            raise errors.OpPrereqError("This change will prevent the instance"
3699
                                       " from failover to its secondary node"
3700
                                       " %s, due to not enough memory" % node,
3701
                                       errors.ECODE_STATE)
3702

    
3703
    if self.op.runtime_mem:
3704
      remote_info = self.rpc.call_instance_info(instance.primary_node,
3705
                                                instance.name,
3706
                                                instance.hypervisor)
3707
      remote_info.Raise("Error checking node %s" % instance.primary_node)
3708
      if not remote_info.payload: # not running already
3709
        raise errors.OpPrereqError("Instance %s is not running" %
3710
                                   instance.name, errors.ECODE_STATE)
3711

    
3712
      current_memory = remote_info.payload["memory"]
3713
      if (not self.op.force and
3714
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3715
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3716
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3717
                                   " and %d MB of memory unless --force is"
3718
                                   " given" %
3719
                                   (instance.name,
3720
                                    self.be_proposed[constants.BE_MINMEM],
3721
                                    self.be_proposed[constants.BE_MAXMEM]),
3722
                                   errors.ECODE_INVAL)
3723

    
3724
      delta = self.op.runtime_mem - current_memory
3725
      if delta > 0:
3726
        _CheckNodeFreeMemory(self, instance.primary_node,
3727
                             "ballooning memory for instance %s" %
3728
                             instance.name, delta, instance.hypervisor)
3729

    
3730
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
3731
      raise errors.OpPrereqError("Disk operations not supported for"
3732
                                 " diskless instances", errors.ECODE_INVAL)
3733

    
3734
    def _PrepareNicCreate(_, params, private):
3735
      self._PrepareNicModification(params, private, None, None,
3736
                                   {}, cluster, pnode)
3737
      return (None, None)
3738

    
3739
    def _PrepareNicMod(_, nic, params, private):
3740
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3741
                                   nic.nicparams, cluster, pnode)
3742
      return None
3743

    
3744
    def _PrepareNicRemove(_, params, __):
3745
      ip = params.ip
3746
      net = params.network
3747
      if net is not None and ip is not None:
3748
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3749

    
3750
    # Verify NIC changes (operating on copy)
3751
    nics = instance.nics[:]
3752
    ApplyContainerMods("NIC", nics, None, self.nicmod,
3753
                       _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3754
    if len(nics) > constants.MAX_NICS:
3755
      raise errors.OpPrereqError("Instance has too many network interfaces"
3756
                                 " (%d), cannot add more" % constants.MAX_NICS,
3757
                                 errors.ECODE_STATE)
3758

    
3759
    def _PrepareDiskMod(_, disk, params, __):
3760
      disk.name = params.get(constants.IDISK_NAME, None)
3761

    
3762
    # Verify disk changes (operating on a copy)
3763
    disks = copy.deepcopy(instance.disks)
3764
    ApplyContainerMods("disk", disks, None, self.diskmod, None, _PrepareDiskMod,
3765
                       None)
3766
    utils.ValidateDeviceNames("disk", disks)
3767
    if len(disks) > constants.MAX_DISKS:
3768
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
3769
                                 " more" % constants.MAX_DISKS,
3770
                                 errors.ECODE_STATE)
3771
    disk_sizes = [disk.size for disk in instance.disks]
3772
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
3773
                      self.diskmod if op == constants.DDM_ADD)
3774
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
3775
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
3776

    
3777
    if self.op.offline is not None and self.op.offline:
3778
      _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
3779
                          msg="can't change to offline")
3780

    
3781
    # Pre-compute NIC changes (necessary to use result in hooks)
3782
    self._nic_chgdesc = []
3783
    if self.nicmod:
3784
      # Operate on copies as this is still in prereq
3785
      nics = [nic.Copy() for nic in instance.nics]
3786
      ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3787
                         self._CreateNewNic, self._ApplyNicMods, None)
3788
      # Verify that NIC names are unique and valid
3789
      utils.ValidateDeviceNames("NIC", nics)
3790
      self._new_nics = nics
3791
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3792
    else:
3793
      self._new_nics = None
3794
      ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
3795

    
3796
    if not self.op.ignore_ipolicy:
3797
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
3798
                                                              group_info)
3799

    
3800
      # Fill ispec with backend parameters
3801
      ispec[constants.ISPEC_SPINDLE_USE] = \
3802
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3803
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3804
                                                         None)
3805

    
3806
      # Copy ispec to verify parameters with min/max values separately
3807
      if self.op.disk_template:
3808
        new_disk_template = self.op.disk_template
3809
      else:
3810
        new_disk_template = instance.disk_template
3811
      ispec_max = ispec.copy()
3812
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3813
        self.be_new.get(constants.BE_MAXMEM, None)
3814
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3815
                                                     new_disk_template)
3816
      ispec_min = ispec.copy()
3817
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3818
        self.be_new.get(constants.BE_MINMEM, None)
3819
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3820
                                                     new_disk_template)
3821

    
3822
      if (res_max or res_min):
3823
        # FIXME: Improve error message by including information about whether
3824
        # the upper or lower limit of the parameter fails the ipolicy.
3825
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3826
               (group_info, group_info.name,
3827
                utils.CommaJoin(set(res_max + res_min))))
3828
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3829

    
3830
  def _ConvertPlainToDrbd(self, feedback_fn):
3831
    """Converts an instance from plain to drbd.
3832

3833
    """
3834
    feedback_fn("Converting template to drbd")
3835
    instance = self.instance
3836
    pnode = instance.primary_node
3837
    snode = self.op.remote_node
3838

    
3839
    assert instance.disk_template == constants.DT_PLAIN
3840

    
3841
    # create a fake disk info for _GenerateDiskTemplate
3842
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3843
                  constants.IDISK_VG: d.logical_id[0],
3844
                  constants.IDISK_NAME: d.name}
3845
                 for d in instance.disks]
3846
    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
3847
                                      instance.name, pnode, [snode],
3848
                                      disk_info, None, None, 0, feedback_fn,
3849
                                      self.diskparams)
3850
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3851
                                        self.diskparams)
3852
    p_excl_stor = _IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
3853
    s_excl_stor = _IsExclusiveStorageEnabledNodeName(self.cfg, snode)
3854
    info = _GetInstanceInfoText(instance)
3855
    feedback_fn("Creating additional volumes...")
3856
    # first, create the missing data and meta devices
3857
    for disk in anno_disks:
3858
      # unfortunately this is... not too nice
3859
      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3860
                            info, True, p_excl_stor)
3861
      for child in disk.children:
3862
        _CreateSingleBlockDev(self, snode, instance, child, info, True,
3863
                              s_excl_stor)
3864
    # at this stage, all new LVs have been created, we can rename the
3865
    # old ones
3866
    feedback_fn("Renaming original volumes...")
3867
    rename_list = [(o, n.children[0].logical_id)
3868
                   for (o, n) in zip(instance.disks, new_disks)]
3869
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
3870
    result.Raise("Failed to rename original LVs")
3871

    
3872
    feedback_fn("Initializing DRBD devices...")
3873
    # all child devices are in place, we can now create the DRBD devices
3874
    try:
3875
      for disk in anno_disks:
3876
        for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3877
          f_create = node == pnode
3878
          _CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3879
                                excl_stor)
3880
    except errors.GenericError, e:
3881
      feedback_fn("Initializing of DRBD devices failed;"
3882
                  " renaming back original volumes...")
3883
      for disk in new_disks:
3884
        self.cfg.SetDiskID(disk, pnode)
3885
      rename_back_list = [(n.children[0], o.logical_id)
3886
                          for (n, o) in zip(new_disks, instance.disks)]
3887
      result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3888
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3889
      raise
3890

    
3891
    # at this point, the instance has been modified
3892
    instance.disk_template = constants.DT_DRBD8
3893
    instance.disks = new_disks
3894
    self.cfg.Update(instance, feedback_fn)
3895

    
3896
    # Release node locks while waiting for sync
3897
    _ReleaseLocks(self, locking.LEVEL_NODE)
3898

    
3899
    # disks are created, waiting for sync
3900
    disk_abort = not _WaitForSync(self, instance,
3901
                                  oneshot=not self.op.wait_for_sync)
3902
    if disk_abort:
3903
      raise errors.OpExecError("There are some degraded disks for"
3904
                               " this instance, please cleanup manually")
3905

    
3906
    # Node resource locks will be released by caller
3907

    
3908
  def _ConvertDrbdToPlain(self, feedback_fn):
3909
    """Converts an instance from drbd to plain.
3910

3911
    """
3912
    instance = self.instance
3913

    
3914
    assert len(instance.secondary_nodes) == 1
3915
    assert instance.disk_template == constants.DT_DRBD8
3916

    
3917
    pnode = instance.primary_node
3918
    snode = instance.secondary_nodes[0]
3919
    feedback_fn("Converting template to plain")
3920

    
3921
    old_disks = _AnnotateDiskParams(instance, instance.disks, self.cfg)
3922
    new_disks = [d.children[0] for d in instance.disks]
3923

    
3924
    # copy over size, mode and name
3925
    for parent, child in zip(old_disks, new_disks):
3926
      child.size = parent.size
3927
      child.mode = parent.mode
3928
      child.name = parent.name
3929

    
3930
    # this is a DRBD disk, return its port to the pool
3931
    # NOTE: this must be done right before the call to cfg.Update!
3932
    for disk in old_disks:
3933
      tcp_port = disk.logical_id[2]
3934
      self.cfg.AddTcpUdpPort(tcp_port)
3935

    
3936
    # update instance structure
3937
    instance.disks = new_disks
3938
    instance.disk_template = constants.DT_PLAIN
3939
    _UpdateIvNames(0, instance.disks)
3940
    self.cfg.Update(instance, feedback_fn)
3941

    
3942
    # Release locks in case removing disks takes a while
3943
    _ReleaseLocks(self, locking.LEVEL_NODE)
3944

    
3945
    feedback_fn("Removing volumes on the secondary node...")
3946
    for disk in old_disks:
3947
      self.cfg.SetDiskID(disk, snode)
3948
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3949
      if msg:
3950
        self.LogWarning("Could not remove block device %s on node %s,"
3951
                        " continuing anyway: %s", disk.iv_name, snode, msg)
3952

    
3953
    feedback_fn("Removing unneeded volumes on the primary node...")
3954
    for idx, disk in enumerate(old_disks):
3955
      meta = disk.children[1]
3956
      self.cfg.SetDiskID(meta, pnode)
3957
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3958
      if msg:
3959
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3960
                        " continuing anyway: %s", idx, pnode, msg)
3961

    
3962
  def _CreateNewDisk(self, idx, params, _):
3963
    """Creates a new disk.
3964

3965
    """
3966
    instance = self.instance
3967

    
3968
    # add a new disk
3969
    if instance.disk_template in constants.DTS_FILEBASED:
3970
      (file_driver, file_path) = instance.disks[0].logical_id
3971
      file_path = os.path.dirname(file_path)
3972
    else:
3973
      file_driver = file_path = None
3974

    
3975
    disk = \
3976
      _GenerateDiskTemplate(self, instance.disk_template, instance.name,
3977
                            instance.primary_node, instance.secondary_nodes,
3978
                            [params], file_path, file_driver, idx,
3979
                            self.Log, self.diskparams)[0]
3980

    
3981
    info = _GetInstanceInfoText(instance)
3982

    
3983
    logging.info("Creating volume %s for instance %s",
3984
                 disk.iv_name, instance.name)
3985
    # Note: this needs to be kept in sync with _CreateDisks
3986
    #HARDCODE
3987
    for node in instance.all_nodes:
3988
      f_create = (node == instance.primary_node)
3989
      try:
3990
        _CreateBlockDev(self, node, instance, disk, f_create, info, f_create)
3991
      except errors.OpExecError, err:
3992
        self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
3993
                        disk.iv_name, disk, node, err)
3994

    
3995
    if self.cluster.prealloc_wipe_disks:
3996
      # Wipe new disk
3997
      _WipeDisks(self, instance,
3998
                 disks=[(idx, disk, 0)])
3999

    
4000
    return (disk, [
4001
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
4002
      ])
4003

    
4004
  @staticmethod
4005
  def _ModifyDisk(idx, disk, params, _):
4006
    """Modifies a disk.
4007

4008
    """
4009
    changes = []
4010
    mode = params.get(constants.IDISK_MODE, None)
4011
    if mode:
4012
      disk.mode = mode
4013
      changes.append(("disk.mode/%d" % idx, disk.mode))
4014

    
4015
    name = params.get(constants.IDISK_NAME, None)
4016
    disk.name = name
4017
    changes.append(("disk.name/%d" % idx, disk.name))
4018

    
4019
    return changes
4020

    
4021
  def _RemoveDisk(self, idx, root, _):
4022
    """Removes a disk.
4023

4024
    """
4025
    (anno_disk,) = _AnnotateDiskParams(self.instance, [root], self.cfg)
4026
    for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
4027
      self.cfg.SetDiskID(disk, node)
4028
      msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
4029
      if msg:
4030
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
4031
                        " continuing anyway", idx, node, msg)
4032

    
4033
    # if this is a DRBD disk, return its port to the pool
4034
    if root.dev_type in constants.LDS_DRBD:
4035
      self.cfg.AddTcpUdpPort(root.logical_id[2])
4036

    
4037
  def _CreateNewNic(self, idx, params, private):
4038
    """Creates data structure for a new network interface.
4039

4040
    """
4041
    mac = params[constants.INIC_MAC]
4042
    ip = params.get(constants.INIC_IP, None)
4043
    net = params.get(constants.INIC_NETWORK, None)
4044
    name = params.get(constants.INIC_NAME, None)
4045
    net_uuid = self.cfg.LookupNetwork(net)
4046
    #TODO: not private.filled?? can a nic have no nicparams??
4047
    nicparams = private.filled
4048
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
4049
                       nicparams=nicparams)
4050
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
4051

    
4052
    return (nobj, [
4053
      ("nic.%d" % idx,
4054
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
4055
       (mac, ip, private.filled[constants.NIC_MODE],
4056
       private.filled[constants.NIC_LINK],
4057
       net)),
4058
      ])
4059

    
4060
  def _ApplyNicMods(self, idx, nic, params, private):
4061
    """Modifies a network interface.
4062

4063
    """
4064
    changes = []
4065

    
4066
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
4067
      if key in params:
4068
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
4069
        setattr(nic, key, params[key])
4070

    
4071
    new_net = params.get(constants.INIC_NETWORK, nic.network)
4072
    new_net_uuid = self.cfg.LookupNetwork(new_net)
4073
    if new_net_uuid != nic.network:
4074
      changes.append(("nic.network/%d" % idx, new_net))
4075
      nic.network = new_net_uuid
4076

    
4077
    if private.filled:
4078
      nic.nicparams = private.filled
4079

    
4080
      for (key, val) in nic.nicparams.items():
4081
        changes.append(("nic.%s/%d" % (key, idx), val))
4082

    
4083
    return changes
4084

    
4085
  def Exec(self, feedback_fn):
4086
    """Modifies an instance.
4087

4088
    All parameters take effect only at the next restart of the instance.
4089

4090
    """
4091
    # Process here the warnings from CheckPrereq, as we don't have a
4092
    # feedback_fn there.
4093
    # TODO: Replace with self.LogWarning
4094
    for warn in self.warn:
4095
      feedback_fn("WARNING: %s" % warn)
4096

    
4097
    assert ((self.op.disk_template is None) ^
4098
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
4099
      "Not owning any node resource locks"
4100

    
4101
    result = []
4102
    instance = self.instance
4103

    
4104
    # New primary node
4105
    if self.op.pnode:
4106
      instance.primary_node = self.op.pnode
4107

    
4108
    # runtime memory
4109
    if self.op.runtime_mem:
4110
      rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
4111
                                                     instance,
4112
                                                     self.op.runtime_mem)
4113
      rpcres.Raise("Cannot modify instance runtime memory")
4114
      result.append(("runtime_memory", self.op.runtime_mem))
4115

    
4116
    # Apply disk changes
4117
    ApplyContainerMods("disk", instance.disks, result, self.diskmod,
4118
                       self._CreateNewDisk, self._ModifyDisk, self._RemoveDisk)
4119
    _UpdateIvNames(0, instance.disks)
4120

    
4121
    if self.op.disk_template:
4122
      if __debug__:
4123
        check_nodes = set(instance.all_nodes)
4124
        if self.op.remote_node:
4125
          check_nodes.add(self.op.remote_node)
4126
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
4127
          owned = self.owned_locks(level)
4128
          assert not (check_nodes - owned), \
4129
            ("Not owning the correct locks, owning %r, expected at least %r" %
4130
             (owned, check_nodes))
4131

    
4132
      r_shut = _ShutdownInstanceDisks(self, instance)
4133
      if not r_shut:
4134
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
4135
                                 " proceed with disk template conversion")
4136
      mode = (instance.disk_template, self.op.disk_template)
4137
      try:
4138
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
4139
      except:
4140
        self.cfg.ReleaseDRBDMinors(instance.name)
4141
        raise
4142
      result.append(("disk_template", self.op.disk_template))
4143

    
4144
      assert instance.disk_template == self.op.disk_template, \
4145
        ("Expected disk template '%s', found '%s'" %
4146
         (self.op.disk_template, instance.disk_template))
4147

    
4148
    # Release node and resource locks if there are any (they might already have
4149
    # been released during disk conversion)
4150
    _ReleaseLocks(self, locking.LEVEL_NODE)
4151
    _ReleaseLocks(self, locking.LEVEL_NODE_RES)
4152

    
4153
    # Apply NIC changes
4154
    if self._new_nics is not None:
4155
      instance.nics = self._new_nics
4156
      result.extend(self._nic_chgdesc)
4157

    
4158
    # hvparams changes
4159
    if self.op.hvparams:
4160
      instance.hvparams = self.hv_inst
4161
      for key, val in self.op.hvparams.iteritems():
4162
        result.append(("hv/%s" % key, val))
4163

    
4164
    # beparams changes
4165
    if self.op.beparams:
4166
      instance.beparams = self.be_inst
4167
      for key, val in self.op.beparams.iteritems():
4168
        result.append(("be/%s" % key, val))
4169

    
4170
    # OS change
4171
    if self.op.os_name:
4172
      instance.os = self.op.os_name
4173

    
4174
    # osparams changes
4175
    if self.op.osparams:
4176
      instance.osparams = self.os_inst
4177
      for key, val in self.op.osparams.iteritems():
4178
        result.append(("os/%s" % key, val))
4179

    
4180
    if self.op.offline is None:
4181
      # Ignore
4182
      pass
4183
    elif self.op.offline:
4184
      # Mark instance as offline
4185
      self.cfg.MarkInstanceOffline(instance.name)
4186
      result.append(("admin_state", constants.ADMINST_OFFLINE))
4187
    else:
4188
      # Mark instance as online, but stopped
4189
      self.cfg.MarkInstanceDown(instance.name)
4190
      result.append(("admin_state", constants.ADMINST_DOWN))
4191

    
4192
    self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
4193

    
4194
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
4195
                self.owned_locks(locking.LEVEL_NODE)), \
4196
      "All node locks should have been released by now"
4197

    
4198
    return result
4199

    
4200
  _DISK_CONVERSIONS = {
4201
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
4202
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
4203
    }
4204

    
4205

    
4206
class LUInstanceChangeGroup(LogicalUnit):
4207
  HPATH = "instance-change-group"
4208
  HTYPE = constants.HTYPE_INSTANCE
4209
  REQ_BGL = False
4210

    
4211
  def ExpandNames(self):
4212
    self.share_locks = _ShareAll()
4213

    
4214
    self.needed_locks = {
4215
      locking.LEVEL_NODEGROUP: [],
4216
      locking.LEVEL_NODE: [],
4217
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
4218
      }
4219

    
4220
    self._ExpandAndLockInstance()
4221

    
4222
    if self.op.target_groups:
4223
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
4224
                                  self.op.target_groups)
4225
    else:
4226
      self.req_target_uuids = None
4227

    
4228
    self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
4229

    
4230
  def DeclareLocks(self, level):
4231
    if level == locking.LEVEL_NODEGROUP:
4232
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
4233

    
4234
      if self.req_target_uuids:
4235
        lock_groups = set(self.req_target_uuids)
4236

    
4237
        # Lock all groups used by instance optimistically; this requires going
4238
        # via the node before it's locked, requiring verification later on
4239
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
4240
        lock_groups.update(instance_groups)
4241
      else:
4242
        # No target groups, need to lock all of them
4243
        lock_groups = locking.ALL_SET
4244

    
4245
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
4246

    
4247
    elif level == locking.LEVEL_NODE:
4248
      if self.req_target_uuids:
4249
        # Lock all nodes used by instances
4250
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4251
        self._LockInstancesNodes()
4252

    
4253
        # Lock all nodes in all potential target groups
4254
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
4255
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
4256
        member_nodes = [node_name
4257
                        for group in lock_groups
4258
                        for node_name in self.cfg.GetNodeGroup(group).members]
4259
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
4260
      else:
4261
        # Lock all nodes as all groups are potential targets
4262
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4263

    
4264
  def CheckPrereq(self):
4265
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
4266
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
4267
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
4268

    
4269
    assert (self.req_target_uuids is None or
4270
            owned_groups.issuperset(self.req_target_uuids))
4271
    assert owned_instances == set([self.op.instance_name])
4272

    
4273
    # Get instance information
4274
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4275

    
4276
    # Check if node groups for locked instance are still correct
4277
    assert owned_nodes.issuperset(self.instance.all_nodes), \
4278
      ("Instance %s's nodes changed while we kept the lock" %
4279
       self.op.instance_name)
4280

    
4281
    inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
4282
                                           owned_groups)
4283

    
4284
    if self.req_target_uuids:
4285
      # User requested specific target groups
4286
      self.target_uuids = frozenset(self.req_target_uuids)
4287
    else:
4288
      # All groups except those used by the instance are potential targets
4289
      self.target_uuids = owned_groups - inst_groups
4290

    
4291
    conflicting_groups = self.target_uuids & inst_groups
4292
    if conflicting_groups:
4293
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
4294
                                 " used by the instance '%s'" %
4295
                                 (utils.CommaJoin(conflicting_groups),
4296
                                  self.op.instance_name),
4297
                                 errors.ECODE_INVAL)
4298

    
4299
    if not self.target_uuids:
4300
      raise errors.OpPrereqError("There are no possible target groups",
4301
                                 errors.ECODE_INVAL)
4302

    
4303
  def BuildHooksEnv(self):
4304
    """Build hooks env.
4305

4306
    """
4307
    assert self.target_uuids
4308

    
4309
    env = {
4310
      "TARGET_GROUPS": " ".join(self.target_uuids),
4311
      }
4312

    
4313
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4314

    
4315
    return env
4316

    
4317
  def BuildHooksNodes(self):
4318
    """Build hooks nodes.
4319

4320
    """
4321
    mn = self.cfg.GetMasterNode()
4322
    return ([mn], [mn])
4323

    
4324
  def Exec(self, feedback_fn):
4325
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
4326

    
4327
    assert instances == [self.op.instance_name], "Instance not locked"
4328

    
4329
    req = iallocator.IAReqGroupChange(instances=instances,
4330
                                      target_groups=list(self.target_uuids))
4331
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
4332

    
4333
    ial.Run(self.op.iallocator)
4334

    
4335
    if not ial.success:
4336
      raise errors.OpPrereqError("Can't compute solution for changing group of"
4337
                                 " instance '%s' using iallocator '%s': %s" %
4338
                                 (self.op.instance_name, self.op.iallocator,
4339
                                  ial.info), errors.ECODE_NORES)
4340

    
4341
    jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
4342

    
4343
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
4344
                 " instance '%s'", len(jobs), self.op.instance_name)
4345

    
4346
    return ResultWithJobs(jobs)