Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ ec0292f1

History | View | Annotate | Download (214.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0613,W0201
25

    
26
import os
27
import os.path
28
import sha
29
import time
30
import tempfile
31
import re
32
import platform
33
import logging
34
import copy
35
import random
36

    
37
from ganeti import ssh
38
from ganeti import utils
39
from ganeti import errors
40
from ganeti import hypervisor
41
from ganeti import locking
42
from ganeti import constants
43
from ganeti import objects
44
from ganeti import opcodes
45
from ganeti import serializer
46
from ganeti import ssconf
47

    
48

    
49
class LogicalUnit(object):
50
  """Logical Unit base class.
51

52
  Subclasses must follow these rules:
53
    - implement ExpandNames
54
    - implement CheckPrereq
55
    - implement Exec
56
    - implement BuildHooksEnv
57
    - redefine HPATH and HTYPE
58
    - optionally redefine their run requirements:
59
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60

61
  Note that all commands require root permissions.
62

63
  """
64
  HPATH = None
65
  HTYPE = None
66
  _OP_REQP = []
67
  REQ_BGL = True
68

    
69
  def __init__(self, processor, op, context, rpc):
70
    """Constructor for LogicalUnit.
71

72
    This needs to be overriden in derived classes in order to check op
73
    validity.
74

75
    """
76
    self.proc = processor
77
    self.op = op
78
    self.cfg = context.cfg
79
    self.context = context
80
    self.rpc = rpc
81
    # Dicts used to declare locking needs to mcpu
82
    self.needed_locks = None
83
    self.acquired_locks = {}
84
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85
    self.add_locks = {}
86
    self.remove_locks = {}
87
    # Used to force good behavior when calling helper functions
88
    self.recalculate_locks = {}
89
    self.__ssh = None
90
    # logging
91
    self.LogWarning = processor.LogWarning
92
    self.LogInfo = processor.LogInfo
93

    
94
    for attr_name in self._OP_REQP:
95
      attr_val = getattr(op, attr_name, None)
96
      if attr_val is None:
97
        raise errors.OpPrereqError("Required parameter '%s' missing" %
98
                                   attr_name)
99
    self.CheckArguments()
100

    
101
  def __GetSSH(self):
102
    """Returns the SshRunner object
103

104
    """
105
    if not self.__ssh:
106
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
107
    return self.__ssh
108

    
109
  ssh = property(fget=__GetSSH)
110

    
111
  def CheckArguments(self):
112
    """Check syntactic validity for the opcode arguments.
113

114
    This method is for doing a simple syntactic check and ensure
115
    validity of opcode parameters, without any cluster-related
116
    checks. While the same can be accomplished in ExpandNames and/or
117
    CheckPrereq, doing these separate is better because:
118

119
      - ExpandNames is left as as purely a lock-related function
120
      - CheckPrereq is run after we have aquired locks (and possible
121
        waited for them)
122

123
    The function is allowed to change the self.op attribute so that
124
    later methods can no longer worry about missing parameters.
125

126
    """
127
    pass
128

    
129
  def ExpandNames(self):
130
    """Expand names for this LU.
131

132
    This method is called before starting to execute the opcode, and it should
133
    update all the parameters of the opcode to their canonical form (e.g. a
134
    short node name must be fully expanded after this method has successfully
135
    completed). This way locking, hooks, logging, ecc. can work correctly.
136

137
    LUs which implement this method must also populate the self.needed_locks
138
    member, as a dict with lock levels as keys, and a list of needed lock names
139
    as values. Rules:
140

141
      - use an empty dict if you don't need any lock
142
      - if you don't need any lock at a particular level omit that level
143
      - don't put anything for the BGL level
144
      - if you want all locks at a level use locking.ALL_SET as a value
145

146
    If you need to share locks (rather than acquire them exclusively) at one
147
    level you can modify self.share_locks, setting a true value (usually 1) for
148
    that level. By default locks are not shared.
149

150
    Examples::
151

152
      # Acquire all nodes and one instance
153
      self.needed_locks = {
154
        locking.LEVEL_NODE: locking.ALL_SET,
155
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
156
      }
157
      # Acquire just two nodes
158
      self.needed_locks = {
159
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
160
      }
161
      # Acquire no locks
162
      self.needed_locks = {} # No, you can't leave it to the default value None
163

164
    """
165
    # The implementation of this method is mandatory only if the new LU is
166
    # concurrent, so that old LUs don't need to be changed all at the same
167
    # time.
168
    if self.REQ_BGL:
169
      self.needed_locks = {} # Exclusive LUs don't need locks.
170
    else:
171
      raise NotImplementedError
172

    
173
  def DeclareLocks(self, level):
174
    """Declare LU locking needs for a level
175

176
    While most LUs can just declare their locking needs at ExpandNames time,
177
    sometimes there's the need to calculate some locks after having acquired
178
    the ones before. This function is called just before acquiring locks at a
179
    particular level, but after acquiring the ones at lower levels, and permits
180
    such calculations. It can be used to modify self.needed_locks, and by
181
    default it does nothing.
182

183
    This function is only called if you have something already set in
184
    self.needed_locks for the level.
185

186
    @param level: Locking level which is going to be locked
187
    @type level: member of ganeti.locking.LEVELS
188

189
    """
190

    
191
  def CheckPrereq(self):
192
    """Check prerequisites for this LU.
193

194
    This method should check that the prerequisites for the execution
195
    of this LU are fulfilled. It can do internode communication, but
196
    it should be idempotent - no cluster or system changes are
197
    allowed.
198

199
    The method should raise errors.OpPrereqError in case something is
200
    not fulfilled. Its return value is ignored.
201

202
    This method should also update all the parameters of the opcode to
203
    their canonical form if it hasn't been done by ExpandNames before.
204

205
    """
206
    raise NotImplementedError
207

    
208
  def Exec(self, feedback_fn):
209
    """Execute the LU.
210

211
    This method should implement the actual work. It should raise
212
    errors.OpExecError for failures that are somewhat dealt with in
213
    code, or expected.
214

215
    """
216
    raise NotImplementedError
217

    
218
  def BuildHooksEnv(self):
219
    """Build hooks environment for this LU.
220

221
    This method should return a three-node tuple consisting of: a dict
222
    containing the environment that will be used for running the
223
    specific hook for this LU, a list of node names on which the hook
224
    should run before the execution, and a list of node names on which
225
    the hook should run after the execution.
226

227
    The keys of the dict must not have 'GANETI_' prefixed as this will
228
    be handled in the hooks runner. Also note additional keys will be
229
    added by the hooks runner. If the LU doesn't define any
230
    environment, an empty dict (and not None) should be returned.
231

232
    No nodes should be returned as an empty list (and not None).
233

234
    Note that if the HPATH for a LU class is None, this function will
235
    not be called.
236

237
    """
238
    raise NotImplementedError
239

    
240
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
241
    """Notify the LU about the results of its hooks.
242

243
    This method is called every time a hooks phase is executed, and notifies
244
    the Logical Unit about the hooks' result. The LU can then use it to alter
245
    its result based on the hooks.  By default the method does nothing and the
246
    previous result is passed back unchanged but any LU can define it if it
247
    wants to use the local cluster hook-scripts somehow.
248

249
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
250
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
251
    @param hook_results: the results of the multi-node hooks rpc call
252
    @param feedback_fn: function used send feedback back to the caller
253
    @param lu_result: the previous Exec result this LU had, or None
254
        in the PRE phase
255
    @return: the new Exec result, based on the previous result
256
        and hook results
257

258
    """
259
    return lu_result
260

    
261
  def _ExpandAndLockInstance(self):
262
    """Helper function to expand and lock an instance.
263

264
    Many LUs that work on an instance take its name in self.op.instance_name
265
    and need to expand it and then declare the expanded name for locking. This
266
    function does it, and then updates self.op.instance_name to the expanded
267
    name. It also initializes needed_locks as a dict, if this hasn't been done
268
    before.
269

270
    """
271
    if self.needed_locks is None:
272
      self.needed_locks = {}
273
    else:
274
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
275
        "_ExpandAndLockInstance called with instance-level locks set"
276
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
277
    if expanded_name is None:
278
      raise errors.OpPrereqError("Instance '%s' not known" %
279
                                  self.op.instance_name)
280
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
281
    self.op.instance_name = expanded_name
282

    
283
  def _LockInstancesNodes(self, primary_only=False):
284
    """Helper function to declare instances' nodes for locking.
285

286
    This function should be called after locking one or more instances to lock
287
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
288
    with all primary or secondary nodes for instances already locked and
289
    present in self.needed_locks[locking.LEVEL_INSTANCE].
290

291
    It should be called from DeclareLocks, and for safety only works if
292
    self.recalculate_locks[locking.LEVEL_NODE] is set.
293

294
    In the future it may grow parameters to just lock some instance's nodes, or
295
    to just lock primaries or secondary nodes, if needed.
296

297
    If should be called in DeclareLocks in a way similar to::
298

299
      if level == locking.LEVEL_NODE:
300
        self._LockInstancesNodes()
301

302
    @type primary_only: boolean
303
    @param primary_only: only lock primary nodes of locked instances
304

305
    """
306
    assert locking.LEVEL_NODE in self.recalculate_locks, \
307
      "_LockInstancesNodes helper function called with no nodes to recalculate"
308

    
309
    # TODO: check if we're really been called with the instance locks held
310

    
311
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
312
    # future we might want to have different behaviors depending on the value
313
    # of self.recalculate_locks[locking.LEVEL_NODE]
314
    wanted_nodes = []
315
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
316
      instance = self.context.cfg.GetInstanceInfo(instance_name)
317
      wanted_nodes.append(instance.primary_node)
318
      if not primary_only:
319
        wanted_nodes.extend(instance.secondary_nodes)
320

    
321
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
322
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
323
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
324
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
325

    
326
    del self.recalculate_locks[locking.LEVEL_NODE]
327

    
328

    
329
class NoHooksLU(LogicalUnit):
330
  """Simple LU which runs no hooks.
331

332
  This LU is intended as a parent for other LogicalUnits which will
333
  run no hooks, in order to reduce duplicate code.
334

335
  """
336
  HPATH = None
337
  HTYPE = None
338

    
339

    
340
def _GetWantedNodes(lu, nodes):
341
  """Returns list of checked and expanded node names.
342

343
  @type lu: L{LogicalUnit}
344
  @param lu: the logical unit on whose behalf we execute
345
  @type nodes: list
346
  @param nodes: list of node names or None for all nodes
347
  @rtype: list
348
  @return: the list of nodes, sorted
349
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
350

351
  """
352
  if not isinstance(nodes, list):
353
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
354

    
355
  if not nodes:
356
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
357
      " non-empty list of nodes whose name is to be expanded.")
358

    
359
  wanted = []
360
  for name in nodes:
361
    node = lu.cfg.ExpandNodeName(name)
362
    if node is None:
363
      raise errors.OpPrereqError("No such node name '%s'" % name)
364
    wanted.append(node)
365

    
366
  return utils.NiceSort(wanted)
367

    
368

    
369
def _GetWantedInstances(lu, instances):
370
  """Returns list of checked and expanded instance names.
371

372
  @type lu: L{LogicalUnit}
373
  @param lu: the logical unit on whose behalf we execute
374
  @type instances: list
375
  @param instances: list of instance names or None for all instances
376
  @rtype: list
377
  @return: the list of instances, sorted
378
  @raise errors.OpPrereqError: if the instances parameter is wrong type
379
  @raise errors.OpPrereqError: if any of the passed instances is not found
380

381
  """
382
  if not isinstance(instances, list):
383
    raise errors.OpPrereqError("Invalid argument type 'instances'")
384

    
385
  if instances:
386
    wanted = []
387

    
388
    for name in instances:
389
      instance = lu.cfg.ExpandInstanceName(name)
390
      if instance is None:
391
        raise errors.OpPrereqError("No such instance name '%s'" % name)
392
      wanted.append(instance)
393

    
394
  else:
395
    wanted = lu.cfg.GetInstanceList()
396
  return utils.NiceSort(wanted)
397

    
398

    
399
def _CheckOutputFields(static, dynamic, selected):
400
  """Checks whether all selected fields are valid.
401

402
  @type static: L{utils.FieldSet}
403
  @param static: static fields set
404
  @type dynamic: L{utils.FieldSet}
405
  @param dynamic: dynamic fields set
406

407
  """
408
  f = utils.FieldSet()
409
  f.Extend(static)
410
  f.Extend(dynamic)
411

    
412
  delta = f.NonMatching(selected)
413
  if delta:
414
    raise errors.OpPrereqError("Unknown output fields selected: %s"
415
                               % ",".join(delta))
416

    
417

    
418
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
419
                          memory, vcpus, nics):
420
  """Builds instance related env variables for hooks
421

422
  This builds the hook environment from individual variables.
423

424
  @type name: string
425
  @param name: the name of the instance
426
  @type primary_node: string
427
  @param primary_node: the name of the instance's primary node
428
  @type secondary_nodes: list
429
  @param secondary_nodes: list of secondary nodes as strings
430
  @type os_type: string
431
  @param os_type: the name of the instance's OS
432
  @type status: string
433
  @param status: the desired status of the instances
434
  @type memory: string
435
  @param memory: the memory size of the instance
436
  @type vcpus: string
437
  @param vcpus: the count of VCPUs the instance has
438
  @type nics: list
439
  @param nics: list of tuples (ip, bridge, mac) representing
440
      the NICs the instance  has
441
  @rtype: dict
442
  @return: the hook environment for this instance
443

444
  """
445
  env = {
446
    "OP_TARGET": name,
447
    "INSTANCE_NAME": name,
448
    "INSTANCE_PRIMARY": primary_node,
449
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
450
    "INSTANCE_OS_TYPE": os_type,
451
    "INSTANCE_STATUS": status,
452
    "INSTANCE_MEMORY": memory,
453
    "INSTANCE_VCPUS": vcpus,
454
  }
455

    
456
  if nics:
457
    nic_count = len(nics)
458
    for idx, (ip, bridge, mac) in enumerate(nics):
459
      if ip is None:
460
        ip = ""
461
      env["INSTANCE_NIC%d_IP" % idx] = ip
462
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
463
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
464
  else:
465
    nic_count = 0
466

    
467
  env["INSTANCE_NIC_COUNT"] = nic_count
468

    
469
  return env
470

    
471

    
472
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
473
  """Builds instance related env variables for hooks from an object.
474

475
  @type lu: L{LogicalUnit}
476
  @param lu: the logical unit on whose behalf we execute
477
  @type instance: L{objects.Instance}
478
  @param instance: the instance for which we should build the
479
      environment
480
  @type override: dict
481
  @param override: dictionary with key/values that will override
482
      our values
483
  @rtype: dict
484
  @return: the hook environment dictionary
485

486
  """
487
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
488
  args = {
489
    'name': instance.name,
490
    'primary_node': instance.primary_node,
491
    'secondary_nodes': instance.secondary_nodes,
492
    'os_type': instance.os,
493
    'status': instance.os,
494
    'memory': bep[constants.BE_MEMORY],
495
    'vcpus': bep[constants.BE_VCPUS],
496
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
497
  }
498
  if override:
499
    args.update(override)
500
  return _BuildInstanceHookEnv(**args)
501

    
502

    
503
def _AdjustCandidatePool(lu):
504
  """Adjust the candidate pool after node operations.
505

506
  """
507
  mod_list = lu.cfg.MaintainCandidatePool()
508
  if mod_list:
509
    lu.LogInfo("Promoted nodes to master candidate role: %s",
510
               ", ".join(mod_list))
511
    for name in mod_list:
512
      lu.context.ReaddNode(name)
513
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
514
  if mc_now > mc_max:
515
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
516
               (mc_now, mc_max))
517

    
518

    
519
def _CheckInstanceBridgesExist(lu, instance):
520
  """Check that the brigdes needed by an instance exist.
521

522
  """
523
  # check bridges existance
524
  brlist = [nic.bridge for nic in instance.nics]
525
  result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
526
  result.Raise()
527
  if not result.data:
528
    raise errors.OpPrereqError("One or more target bridges %s does not"
529
                               " exist on destination node '%s'" %
530
                               (brlist, instance.primary_node))
531

    
532

    
533
class LUDestroyCluster(NoHooksLU):
534
  """Logical unit for destroying the cluster.
535

536
  """
537
  _OP_REQP = []
538

    
539
  def CheckPrereq(self):
540
    """Check prerequisites.
541

542
    This checks whether the cluster is empty.
543

544
    Any errors are signalled by raising errors.OpPrereqError.
545

546
    """
547
    master = self.cfg.GetMasterNode()
548

    
549
    nodelist = self.cfg.GetNodeList()
550
    if len(nodelist) != 1 or nodelist[0] != master:
551
      raise errors.OpPrereqError("There are still %d node(s) in"
552
                                 " this cluster." % (len(nodelist) - 1))
553
    instancelist = self.cfg.GetInstanceList()
554
    if instancelist:
555
      raise errors.OpPrereqError("There are still %d instance(s) in"
556
                                 " this cluster." % len(instancelist))
557

    
558
  def Exec(self, feedback_fn):
559
    """Destroys the cluster.
560

561
    """
562
    master = self.cfg.GetMasterNode()
563
    result = self.rpc.call_node_stop_master(master, False)
564
    result.Raise()
565
    if not result.data:
566
      raise errors.OpExecError("Could not disable the master role")
567
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
568
    utils.CreateBackup(priv_key)
569
    utils.CreateBackup(pub_key)
570
    return master
571

    
572

    
573
class LUVerifyCluster(LogicalUnit):
574
  """Verifies the cluster status.
575

576
  """
577
  HPATH = "cluster-verify"
578
  HTYPE = constants.HTYPE_CLUSTER
579
  _OP_REQP = ["skip_checks"]
580
  REQ_BGL = False
581

    
582
  def ExpandNames(self):
583
    self.needed_locks = {
584
      locking.LEVEL_NODE: locking.ALL_SET,
585
      locking.LEVEL_INSTANCE: locking.ALL_SET,
586
    }
587
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
588

    
589
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
590
                  node_result, feedback_fn, master_files):
591
    """Run multiple tests against a node.
592

593
    Test list:
594

595
      - compares ganeti version
596
      - checks vg existance and size > 20G
597
      - checks config file checksum
598
      - checks ssh to other nodes
599

600
    @type nodeinfo: L{objects.Node}
601
    @param nodeinfo: the node to check
602
    @param file_list: required list of files
603
    @param local_cksum: dictionary of local files and their checksums
604
    @param node_result: the results from the node
605
    @param feedback_fn: function used to accumulate results
606
    @param master_files: list of files that only masters should have
607

608
    """
609
    node = nodeinfo.name
610

    
611
    # main result, node_result should be a non-empty dict
612
    if not node_result or not isinstance(node_result, dict):
613
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
614
      return True
615

    
616
    # compares ganeti version
617
    local_version = constants.PROTOCOL_VERSION
618
    remote_version = node_result.get('version', None)
619
    if not remote_version:
620
      feedback_fn("  - ERROR: connection to %s failed" % (node))
621
      return True
622

    
623
    if local_version != remote_version:
624
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
625
                      (local_version, node, remote_version))
626
      return True
627

    
628
    # checks vg existance and size > 20G
629

    
630
    bad = False
631
    vglist = node_result.get(constants.NV_VGLIST, None)
632
    if not vglist:
633
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
634
                      (node,))
635
      bad = True
636
    else:
637
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
638
                                            constants.MIN_VG_SIZE)
639
      if vgstatus:
640
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
641
        bad = True
642

    
643
    # checks config file checksum
644

    
645
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
646
    if not isinstance(remote_cksum, dict):
647
      bad = True
648
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
649
    else:
650
      for file_name in file_list:
651
        node_is_mc = nodeinfo.master_candidate
652
        must_have_file = file_name not in master_files
653
        if file_name not in remote_cksum:
654
          if node_is_mc or must_have_file:
655
            bad = True
656
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
657
        elif remote_cksum[file_name] != local_cksum[file_name]:
658
          if node_is_mc or must_have_file:
659
            bad = True
660
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
661
          else:
662
            # not candidate and this is not a must-have file
663
            bad = True
664
            feedback_fn("  - ERROR: non master-candidate has old/wrong file"
665
                        " '%s'" % file_name)
666
        else:
667
          # all good, except non-master/non-must have combination
668
          if not node_is_mc and not must_have_file:
669
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
670
                        " candidates" % file_name)
671

    
672
    # checks ssh to any
673

    
674
    if constants.NV_NODELIST not in node_result:
675
      bad = True
676
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
677
    else:
678
      if node_result[constants.NV_NODELIST]:
679
        bad = True
680
        for node in node_result[constants.NV_NODELIST]:
681
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
682
                          (node, node_result[constants.NV_NODELIST][node]))
683

    
684
    if constants.NV_NODENETTEST not in node_result:
685
      bad = True
686
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
687
    else:
688
      if node_result[constants.NV_NODENETTEST]:
689
        bad = True
690
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
691
        for node in nlist:
692
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
693
                          (node, node_result[constants.NV_NODENETTEST][node]))
694

    
695
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
696
    if isinstance(hyp_result, dict):
697
      for hv_name, hv_result in hyp_result.iteritems():
698
        if hv_result is not None:
699
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
700
                      (hv_name, hv_result))
701
    return bad
702

    
703
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
704
                      node_instance, feedback_fn):
705
    """Verify an instance.
706

707
    This function checks to see if the required block devices are
708
    available on the instance's node.
709

710
    """
711
    bad = False
712

    
713
    node_current = instanceconfig.primary_node
714

    
715
    node_vol_should = {}
716
    instanceconfig.MapLVsByNode(node_vol_should)
717

    
718
    for node in node_vol_should:
719
      for volume in node_vol_should[node]:
720
        if node not in node_vol_is or volume not in node_vol_is[node]:
721
          feedback_fn("  - ERROR: volume %s missing on node %s" %
722
                          (volume, node))
723
          bad = True
724

    
725
    if not instanceconfig.status == 'down':
726
      if (node_current not in node_instance or
727
          not instance in node_instance[node_current]):
728
        feedback_fn("  - ERROR: instance %s not running on node %s" %
729
                        (instance, node_current))
730
        bad = True
731

    
732
    for node in node_instance:
733
      if (not node == node_current):
734
        if instance in node_instance[node]:
735
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
736
                          (instance, node))
737
          bad = True
738

    
739
    return bad
740

    
741
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
742
    """Verify if there are any unknown volumes in the cluster.
743

744
    The .os, .swap and backup volumes are ignored. All other volumes are
745
    reported as unknown.
746

747
    """
748
    bad = False
749

    
750
    for node in node_vol_is:
751
      for volume in node_vol_is[node]:
752
        if node not in node_vol_should or volume not in node_vol_should[node]:
753
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
754
                      (volume, node))
755
          bad = True
756
    return bad
757

    
758
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
759
    """Verify the list of running instances.
760

761
    This checks what instances are running but unknown to the cluster.
762

763
    """
764
    bad = False
765
    for node in node_instance:
766
      for runninginstance in node_instance[node]:
767
        if runninginstance not in instancelist:
768
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
769
                          (runninginstance, node))
770
          bad = True
771
    return bad
772

    
773
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
774
    """Verify N+1 Memory Resilience.
775

776
    Check that if one single node dies we can still start all the instances it
777
    was primary for.
778

779
    """
780
    bad = False
781

    
782
    for node, nodeinfo in node_info.iteritems():
783
      # This code checks that every node which is now listed as secondary has
784
      # enough memory to host all instances it is supposed to should a single
785
      # other node in the cluster fail.
786
      # FIXME: not ready for failover to an arbitrary node
787
      # FIXME: does not support file-backed instances
788
      # WARNING: we currently take into account down instances as well as up
789
      # ones, considering that even if they're down someone might want to start
790
      # them even in the event of a node failure.
791
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
792
        needed_mem = 0
793
        for instance in instances:
794
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
795
          if bep[constants.BE_AUTO_BALANCE]:
796
            needed_mem += bep[constants.BE_MEMORY]
797
        if nodeinfo['mfree'] < needed_mem:
798
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
799
                      " failovers should node %s fail" % (node, prinode))
800
          bad = True
801
    return bad
802

    
803
  def CheckPrereq(self):
804
    """Check prerequisites.
805

806
    Transform the list of checks we're going to skip into a set and check that
807
    all its members are valid.
808

809
    """
810
    self.skip_set = frozenset(self.op.skip_checks)
811
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
812
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
813

    
814
  def BuildHooksEnv(self):
815
    """Build hooks env.
816

817
    Cluster-Verify hooks just rone in the post phase and their failure makes
818
    the output be logged in the verify output and the verification to fail.
819

820
    """
821
    all_nodes = self.cfg.GetNodeList()
822
    # TODO: populate the environment with useful information for verify hooks
823
    env = {}
824
    return env, [], all_nodes
825

    
826
  def Exec(self, feedback_fn):
827
    """Verify integrity of cluster, performing various test on nodes.
828

829
    """
830
    bad = False
831
    feedback_fn("* Verifying global settings")
832
    for msg in self.cfg.VerifyConfig():
833
      feedback_fn("  - ERROR: %s" % msg)
834

    
835
    vg_name = self.cfg.GetVGName()
836
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
837
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
838
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
839
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
840
    i_non_redundant = [] # Non redundant instances
841
    i_non_a_balanced = [] # Non auto-balanced instances
842
    node_volume = {}
843
    node_instance = {}
844
    node_info = {}
845
    instance_cfg = {}
846

    
847
    # FIXME: verify OS list
848
    # do local checksums
849
    master_files = [constants.CLUSTER_CONF_FILE]
850

    
851
    file_names = ssconf.SimpleStore().GetFileList()
852
    file_names.append(constants.SSL_CERT_FILE)
853
    file_names.extend(master_files)
854

    
855
    local_checksums = utils.FingerprintFiles(file_names)
856

    
857
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
858
    node_verify_param = {
859
      constants.NV_FILELIST: file_names,
860
      constants.NV_NODELIST: nodelist,
861
      constants.NV_HYPERVISOR: hypervisors,
862
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
863
                                  node.secondary_ip) for node in nodeinfo],
864
      constants.NV_LVLIST: vg_name,
865
      constants.NV_INSTANCELIST: hypervisors,
866
      constants.NV_VGLIST: None,
867
      constants.NV_VERSION: None,
868
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
869
      }
870
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
871
                                           self.cfg.GetClusterName())
872

    
873
    cluster = self.cfg.GetClusterInfo()
874
    master_node = self.cfg.GetMasterNode()
875
    for node_i in nodeinfo:
876
      node = node_i.name
877
      nresult = all_nvinfo[node].data
878

    
879
      if node == master_node:
880
        ntype = "master"
881
      elif node_i.master_candidate:
882
        ntype = "master candidate"
883
      else:
884
        ntype = "regular"
885
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
886

    
887
      if all_nvinfo[node].failed or not isinstance(nresult, dict):
888
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
889
        bad = True
890
        continue
891

    
892
      result = self._VerifyNode(node_i, file_names, local_checksums,
893
                                nresult, feedback_fn, master_files)
894
      bad = bad or result
895

    
896
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
897
      if isinstance(lvdata, basestring):
898
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
899
                    (node, lvdata.encode('string_escape')))
900
        bad = True
901
        node_volume[node] = {}
902
      elif not isinstance(lvdata, dict):
903
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
904
        bad = True
905
        continue
906
      else:
907
        node_volume[node] = lvdata
908

    
909
      # node_instance
910
      idata = nresult.get(constants.NV_INSTANCELIST, None)
911
      if not isinstance(idata, list):
912
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
913
                    (node,))
914
        bad = True
915
        continue
916

    
917
      node_instance[node] = idata
918

    
919
      # node_info
920
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
921
      if not isinstance(nodeinfo, dict):
922
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
923
        bad = True
924
        continue
925

    
926
      try:
927
        node_info[node] = {
928
          "mfree": int(nodeinfo['memory_free']),
929
          "dfree": int(nresult[constants.NV_VGLIST][vg_name]),
930
          "pinst": [],
931
          "sinst": [],
932
          # dictionary holding all instances this node is secondary for,
933
          # grouped by their primary node. Each key is a cluster node, and each
934
          # value is a list of instances which have the key as primary and the
935
          # current node as secondary.  this is handy to calculate N+1 memory
936
          # availability if you can only failover from a primary to its
937
          # secondary.
938
          "sinst-by-pnode": {},
939
        }
940
      except ValueError:
941
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
942
        bad = True
943
        continue
944

    
945
    node_vol_should = {}
946

    
947
    for instance in instancelist:
948
      feedback_fn("* Verifying instance %s" % instance)
949
      inst_config = self.cfg.GetInstanceInfo(instance)
950
      result =  self._VerifyInstance(instance, inst_config, node_volume,
951
                                     node_instance, feedback_fn)
952
      bad = bad or result
953

    
954
      inst_config.MapLVsByNode(node_vol_should)
955

    
956
      instance_cfg[instance] = inst_config
957

    
958
      pnode = inst_config.primary_node
959
      if pnode in node_info:
960
        node_info[pnode]['pinst'].append(instance)
961
      else:
962
        feedback_fn("  - ERROR: instance %s, connection to primary node"
963
                    " %s failed" % (instance, pnode))
964
        bad = True
965

    
966
      # If the instance is non-redundant we cannot survive losing its primary
967
      # node, so we are not N+1 compliant. On the other hand we have no disk
968
      # templates with more than one secondary so that situation is not well
969
      # supported either.
970
      # FIXME: does not support file-backed instances
971
      if len(inst_config.secondary_nodes) == 0:
972
        i_non_redundant.append(instance)
973
      elif len(inst_config.secondary_nodes) > 1:
974
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
975
                    % instance)
976

    
977
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
978
        i_non_a_balanced.append(instance)
979

    
980
      for snode in inst_config.secondary_nodes:
981
        if snode in node_info:
982
          node_info[snode]['sinst'].append(instance)
983
          if pnode not in node_info[snode]['sinst-by-pnode']:
984
            node_info[snode]['sinst-by-pnode'][pnode] = []
985
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
986
        else:
987
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
988
                      " %s failed" % (instance, snode))
989

    
990
    feedback_fn("* Verifying orphan volumes")
991
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
992
                                       feedback_fn)
993
    bad = bad or result
994

    
995
    feedback_fn("* Verifying remaining instances")
996
    result = self._VerifyOrphanInstances(instancelist, node_instance,
997
                                         feedback_fn)
998
    bad = bad or result
999

    
1000
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1001
      feedback_fn("* Verifying N+1 Memory redundancy")
1002
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1003
      bad = bad or result
1004

    
1005
    feedback_fn("* Other Notes")
1006
    if i_non_redundant:
1007
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1008
                  % len(i_non_redundant))
1009

    
1010
    if i_non_a_balanced:
1011
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1012
                  % len(i_non_a_balanced))
1013

    
1014
    return not bad
1015

    
1016
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1017
    """Analize the post-hooks' result
1018

1019
    This method analyses the hook result, handles it, and sends some
1020
    nicely-formatted feedback back to the user.
1021

1022
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1023
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1024
    @param hooks_results: the results of the multi-node hooks rpc call
1025
    @param feedback_fn: function used send feedback back to the caller
1026
    @param lu_result: previous Exec result
1027
    @return: the new Exec result, based on the previous result
1028
        and hook results
1029

1030
    """
1031
    # We only really run POST phase hooks, and are only interested in
1032
    # their results
1033
    if phase == constants.HOOKS_PHASE_POST:
1034
      # Used to change hooks' output to proper indentation
1035
      indent_re = re.compile('^', re.M)
1036
      feedback_fn("* Hooks Results")
1037
      if not hooks_results:
1038
        feedback_fn("  - ERROR: general communication failure")
1039
        lu_result = 1
1040
      else:
1041
        for node_name in hooks_results:
1042
          show_node_header = True
1043
          res = hooks_results[node_name]
1044
          if res.failed or res.data is False or not isinstance(res.data, list):
1045
            feedback_fn("    Communication failure in hooks execution")
1046
            lu_result = 1
1047
            continue
1048
          for script, hkr, output in res.data:
1049
            if hkr == constants.HKR_FAIL:
1050
              # The node header is only shown once, if there are
1051
              # failing hooks on that node
1052
              if show_node_header:
1053
                feedback_fn("  Node %s:" % node_name)
1054
                show_node_header = False
1055
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1056
              output = indent_re.sub('      ', output)
1057
              feedback_fn("%s" % output)
1058
              lu_result = 1
1059

    
1060
      return lu_result
1061

    
1062

    
1063
class LUVerifyDisks(NoHooksLU):
1064
  """Verifies the cluster disks status.
1065

1066
  """
1067
  _OP_REQP = []
1068
  REQ_BGL = False
1069

    
1070
  def ExpandNames(self):
1071
    self.needed_locks = {
1072
      locking.LEVEL_NODE: locking.ALL_SET,
1073
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1074
    }
1075
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1076

    
1077
  def CheckPrereq(self):
1078
    """Check prerequisites.
1079

1080
    This has no prerequisites.
1081

1082
    """
1083
    pass
1084

    
1085
  def Exec(self, feedback_fn):
1086
    """Verify integrity of cluster disks.
1087

1088
    """
1089
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1090

    
1091
    vg_name = self.cfg.GetVGName()
1092
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1093
    instances = [self.cfg.GetInstanceInfo(name)
1094
                 for name in self.cfg.GetInstanceList()]
1095

    
1096
    nv_dict = {}
1097
    for inst in instances:
1098
      inst_lvs = {}
1099
      if (inst.status != "up" or
1100
          inst.disk_template not in constants.DTS_NET_MIRROR):
1101
        continue
1102
      inst.MapLVsByNode(inst_lvs)
1103
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1104
      for node, vol_list in inst_lvs.iteritems():
1105
        for vol in vol_list:
1106
          nv_dict[(node, vol)] = inst
1107

    
1108
    if not nv_dict:
1109
      return result
1110

    
1111
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1112

    
1113
    to_act = set()
1114
    for node in nodes:
1115
      # node_volume
1116
      lvs = node_lvs[node]
1117
      if lvs.failed:
1118
        self.LogWarning("Connection to node %s failed: %s" %
1119
                        (node, lvs.data))
1120
        continue
1121
      lvs = lvs.data
1122
      if isinstance(lvs, basestring):
1123
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1124
        res_nlvm[node] = lvs
1125
      elif not isinstance(lvs, dict):
1126
        logging.warning("Connection to node %s failed or invalid data"
1127
                        " returned", node)
1128
        res_nodes.append(node)
1129
        continue
1130

    
1131
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1132
        inst = nv_dict.pop((node, lv_name), None)
1133
        if (not lv_online and inst is not None
1134
            and inst.name not in res_instances):
1135
          res_instances.append(inst.name)
1136

    
1137
    # any leftover items in nv_dict are missing LVs, let's arrange the
1138
    # data better
1139
    for key, inst in nv_dict.iteritems():
1140
      if inst.name not in res_missing:
1141
        res_missing[inst.name] = []
1142
      res_missing[inst.name].append(key)
1143

    
1144
    return result
1145

    
1146

    
1147
class LURenameCluster(LogicalUnit):
1148
  """Rename the cluster.
1149

1150
  """
1151
  HPATH = "cluster-rename"
1152
  HTYPE = constants.HTYPE_CLUSTER
1153
  _OP_REQP = ["name"]
1154

    
1155
  def BuildHooksEnv(self):
1156
    """Build hooks env.
1157

1158
    """
1159
    env = {
1160
      "OP_TARGET": self.cfg.GetClusterName(),
1161
      "NEW_NAME": self.op.name,
1162
      }
1163
    mn = self.cfg.GetMasterNode()
1164
    return env, [mn], [mn]
1165

    
1166
  def CheckPrereq(self):
1167
    """Verify that the passed name is a valid one.
1168

1169
    """
1170
    hostname = utils.HostInfo(self.op.name)
1171

    
1172
    new_name = hostname.name
1173
    self.ip = new_ip = hostname.ip
1174
    old_name = self.cfg.GetClusterName()
1175
    old_ip = self.cfg.GetMasterIP()
1176
    if new_name == old_name and new_ip == old_ip:
1177
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1178
                                 " cluster has changed")
1179
    if new_ip != old_ip:
1180
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1181
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1182
                                   " reachable on the network. Aborting." %
1183
                                   new_ip)
1184

    
1185
    self.op.name = new_name
1186

    
1187
  def Exec(self, feedback_fn):
1188
    """Rename the cluster.
1189

1190
    """
1191
    clustername = self.op.name
1192
    ip = self.ip
1193

    
1194
    # shutdown the master IP
1195
    master = self.cfg.GetMasterNode()
1196
    result = self.rpc.call_node_stop_master(master, False)
1197
    if result.failed or not result.data:
1198
      raise errors.OpExecError("Could not disable the master role")
1199

    
1200
    try:
1201
      cluster = self.cfg.GetClusterInfo()
1202
      cluster.cluster_name = clustername
1203
      cluster.master_ip = ip
1204
      self.cfg.Update(cluster)
1205

    
1206
      # update the known hosts file
1207
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1208
      node_list = self.cfg.GetNodeList()
1209
      try:
1210
        node_list.remove(master)
1211
      except ValueError:
1212
        pass
1213
      result = self.rpc.call_upload_file(node_list,
1214
                                         constants.SSH_KNOWN_HOSTS_FILE)
1215
      for to_node, to_result in result.iteritems():
1216
        if to_result.failed or not to_result.data:
1217
          logging.error("Copy of file %s to node %s failed", fname, to_node)
1218

    
1219
    finally:
1220
      result = self.rpc.call_node_start_master(master, False)
1221
      if result.failed or not result.data:
1222
        self.LogWarning("Could not re-enable the master role on"
1223
                        " the master, please restart manually.")
1224

    
1225

    
1226
def _RecursiveCheckIfLVMBased(disk):
1227
  """Check if the given disk or its children are lvm-based.
1228

1229
  @type disk: L{objects.Disk}
1230
  @param disk: the disk to check
1231
  @rtype: booleean
1232
  @return: boolean indicating whether a LD_LV dev_type was found or not
1233

1234
  """
1235
  if disk.children:
1236
    for chdisk in disk.children:
1237
      if _RecursiveCheckIfLVMBased(chdisk):
1238
        return True
1239
  return disk.dev_type == constants.LD_LV
1240

    
1241

    
1242
class LUSetClusterParams(LogicalUnit):
1243
  """Change the parameters of the cluster.
1244

1245
  """
1246
  HPATH = "cluster-modify"
1247
  HTYPE = constants.HTYPE_CLUSTER
1248
  _OP_REQP = []
1249
  REQ_BGL = False
1250

    
1251
  def CheckParameters(self):
1252
    """Check parameters
1253

1254
    """
1255
    if not hasattr(self.op, "candidate_pool_size"):
1256
      self.op.candidate_pool_size = None
1257
    if self.op.candidate_pool_size is not None:
1258
      try:
1259
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1260
      except ValueError, err:
1261
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1262
                                   str(err))
1263
      if self.op.candidate_pool_size < 1:
1264
        raise errors.OpPrereqError("At least one master candidate needed")
1265

    
1266
  def ExpandNames(self):
1267
    # FIXME: in the future maybe other cluster params won't require checking on
1268
    # all nodes to be modified.
1269
    self.needed_locks = {
1270
      locking.LEVEL_NODE: locking.ALL_SET,
1271
    }
1272
    self.share_locks[locking.LEVEL_NODE] = 1
1273

    
1274
  def BuildHooksEnv(self):
1275
    """Build hooks env.
1276

1277
    """
1278
    env = {
1279
      "OP_TARGET": self.cfg.GetClusterName(),
1280
      "NEW_VG_NAME": self.op.vg_name,
1281
      }
1282
    mn = self.cfg.GetMasterNode()
1283
    return env, [mn], [mn]
1284

    
1285
  def CheckPrereq(self):
1286
    """Check prerequisites.
1287

1288
    This checks whether the given params don't conflict and
1289
    if the given volume group is valid.
1290

1291
    """
1292
    # FIXME: This only works because there is only one parameter that can be
1293
    # changed or removed.
1294
    if self.op.vg_name is not None and not self.op.vg_name:
1295
      instances = self.cfg.GetAllInstancesInfo().values()
1296
      for inst in instances:
1297
        for disk in inst.disks:
1298
          if _RecursiveCheckIfLVMBased(disk):
1299
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1300
                                       " lvm-based instances exist")
1301

    
1302
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1303

    
1304
    # if vg_name not None, checks given volume group on all nodes
1305
    if self.op.vg_name:
1306
      vglist = self.rpc.call_vg_list(node_list)
1307
      for node in node_list:
1308
        if vglist[node].failed:
1309
          # ignoring down node
1310
          self.LogWarning("Node %s unreachable/error, ignoring" % node)
1311
          continue
1312
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1313
                                              self.op.vg_name,
1314
                                              constants.MIN_VG_SIZE)
1315
        if vgstatus:
1316
          raise errors.OpPrereqError("Error on node '%s': %s" %
1317
                                     (node, vgstatus))
1318

    
1319
    self.cluster = cluster = self.cfg.GetClusterInfo()
1320
    # validate beparams changes
1321
    if self.op.beparams:
1322
      utils.CheckBEParams(self.op.beparams)
1323
      self.new_beparams = cluster.FillDict(
1324
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1325

    
1326
    # hypervisor list/parameters
1327
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1328
    if self.op.hvparams:
1329
      if not isinstance(self.op.hvparams, dict):
1330
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1331
      for hv_name, hv_dict in self.op.hvparams.items():
1332
        if hv_name not in self.new_hvparams:
1333
          self.new_hvparams[hv_name] = hv_dict
1334
        else:
1335
          self.new_hvparams[hv_name].update(hv_dict)
1336

    
1337
    if self.op.enabled_hypervisors is not None:
1338
      self.hv_list = self.op.enabled_hypervisors
1339
    else:
1340
      self.hv_list = cluster.enabled_hypervisors
1341

    
1342
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1343
      # either the enabled list has changed, or the parameters have, validate
1344
      for hv_name, hv_params in self.new_hvparams.items():
1345
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1346
            (self.op.enabled_hypervisors and
1347
             hv_name in self.op.enabled_hypervisors)):
1348
          # either this is a new hypervisor, or its parameters have changed
1349
          hv_class = hypervisor.GetHypervisor(hv_name)
1350
          hv_class.CheckParameterSyntax(hv_params)
1351
          _CheckHVParams(self, node_list, hv_name, hv_params)
1352

    
1353
  def Exec(self, feedback_fn):
1354
    """Change the parameters of the cluster.
1355

1356
    """
1357
    if self.op.vg_name is not None:
1358
      if self.op.vg_name != self.cfg.GetVGName():
1359
        self.cfg.SetVGName(self.op.vg_name)
1360
      else:
1361
        feedback_fn("Cluster LVM configuration already in desired"
1362
                    " state, not changing")
1363
    if self.op.hvparams:
1364
      self.cluster.hvparams = self.new_hvparams
1365
    if self.op.enabled_hypervisors is not None:
1366
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1367
    if self.op.beparams:
1368
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1369
    if self.op.candidate_pool_size is not None:
1370
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1371

    
1372
    self.cfg.Update(self.cluster)
1373

    
1374
    # we want to update nodes after the cluster so that if any errors
1375
    # happen, we have recorded and saved the cluster info
1376
    if self.op.candidate_pool_size is not None:
1377
      _AdjustCandidatePool(self)
1378

    
1379

    
1380
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1381
  """Sleep and poll for an instance's disk to sync.
1382

1383
  """
1384
  if not instance.disks:
1385
    return True
1386

    
1387
  if not oneshot:
1388
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1389

    
1390
  node = instance.primary_node
1391

    
1392
  for dev in instance.disks:
1393
    lu.cfg.SetDiskID(dev, node)
1394

    
1395
  retries = 0
1396
  while True:
1397
    max_time = 0
1398
    done = True
1399
    cumul_degraded = False
1400
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1401
    if rstats.failed or not rstats.data:
1402
      lu.LogWarning("Can't get any data from node %s", node)
1403
      retries += 1
1404
      if retries >= 10:
1405
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1406
                                 " aborting." % node)
1407
      time.sleep(6)
1408
      continue
1409
    rstats = rstats.data
1410
    retries = 0
1411
    for i in range(len(rstats)):
1412
      mstat = rstats[i]
1413
      if mstat is None:
1414
        lu.LogWarning("Can't compute data for node %s/%s",
1415
                           node, instance.disks[i].iv_name)
1416
        continue
1417
      # we ignore the ldisk parameter
1418
      perc_done, est_time, is_degraded, _ = mstat
1419
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1420
      if perc_done is not None:
1421
        done = False
1422
        if est_time is not None:
1423
          rem_time = "%d estimated seconds remaining" % est_time
1424
          max_time = est_time
1425
        else:
1426
          rem_time = "no time estimate"
1427
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1428
                        (instance.disks[i].iv_name, perc_done, rem_time))
1429
    if done or oneshot:
1430
      break
1431

    
1432
    time.sleep(min(60, max_time))
1433

    
1434
  if done:
1435
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1436
  return not cumul_degraded
1437

    
1438

    
1439
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1440
  """Check that mirrors are not degraded.
1441

1442
  The ldisk parameter, if True, will change the test from the
1443
  is_degraded attribute (which represents overall non-ok status for
1444
  the device(s)) to the ldisk (representing the local storage status).
1445

1446
  """
1447
  lu.cfg.SetDiskID(dev, node)
1448
  if ldisk:
1449
    idx = 6
1450
  else:
1451
    idx = 5
1452

    
1453
  result = True
1454
  if on_primary or dev.AssembleOnSecondary():
1455
    rstats = lu.rpc.call_blockdev_find(node, dev)
1456
    if rstats.failed or not rstats.data:
1457
      logging.warning("Node %s: disk degraded, not found or node down", node)
1458
      result = False
1459
    else:
1460
      result = result and (not rstats.data[idx])
1461
  if dev.children:
1462
    for child in dev.children:
1463
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1464

    
1465
  return result
1466

    
1467

    
1468
class LUDiagnoseOS(NoHooksLU):
1469
  """Logical unit for OS diagnose/query.
1470

1471
  """
1472
  _OP_REQP = ["output_fields", "names"]
1473
  REQ_BGL = False
1474
  _FIELDS_STATIC = utils.FieldSet()
1475
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1476

    
1477
  def ExpandNames(self):
1478
    if self.op.names:
1479
      raise errors.OpPrereqError("Selective OS query not supported")
1480

    
1481
    _CheckOutputFields(static=self._FIELDS_STATIC,
1482
                       dynamic=self._FIELDS_DYNAMIC,
1483
                       selected=self.op.output_fields)
1484

    
1485
    # Lock all nodes, in shared mode
1486
    self.needed_locks = {}
1487
    self.share_locks[locking.LEVEL_NODE] = 1
1488
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1489

    
1490
  def CheckPrereq(self):
1491
    """Check prerequisites.
1492

1493
    """
1494

    
1495
  @staticmethod
1496
  def _DiagnoseByOS(node_list, rlist):
1497
    """Remaps a per-node return list into an a per-os per-node dictionary
1498

1499
    @param node_list: a list with the names of all nodes
1500
    @param rlist: a map with node names as keys and OS objects as values
1501

1502
    @rtype: dict
1503
    @returns: a dictionary with osnames as keys and as value another map, with
1504
        nodes as keys and list of OS objects as values, eg::
1505

1506
          {"debian-etch": {"node1": [<object>,...],
1507
                           "node2": [<object>,]}
1508
          }
1509

1510
    """
1511
    all_os = {}
1512
    for node_name, nr in rlist.iteritems():
1513
      if nr.failed or not nr.data:
1514
        continue
1515
      for os_obj in nr.data:
1516
        if os_obj.name not in all_os:
1517
          # build a list of nodes for this os containing empty lists
1518
          # for each node in node_list
1519
          all_os[os_obj.name] = {}
1520
          for nname in node_list:
1521
            all_os[os_obj.name][nname] = []
1522
        all_os[os_obj.name][node_name].append(os_obj)
1523
    return all_os
1524

    
1525
  def Exec(self, feedback_fn):
1526
    """Compute the list of OSes.
1527

1528
    """
1529
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1530
    node_data = self.rpc.call_os_diagnose(node_list)
1531
    if node_data == False:
1532
      raise errors.OpExecError("Can't gather the list of OSes")
1533
    pol = self._DiagnoseByOS(node_list, node_data)
1534
    output = []
1535
    for os_name, os_data in pol.iteritems():
1536
      row = []
1537
      for field in self.op.output_fields:
1538
        if field == "name":
1539
          val = os_name
1540
        elif field == "valid":
1541
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1542
        elif field == "node_status":
1543
          val = {}
1544
          for node_name, nos_list in os_data.iteritems():
1545
            val[node_name] = [(v.status, v.path) for v in nos_list]
1546
        else:
1547
          raise errors.ParameterError(field)
1548
        row.append(val)
1549
      output.append(row)
1550

    
1551
    return output
1552

    
1553

    
1554
class LURemoveNode(LogicalUnit):
1555
  """Logical unit for removing a node.
1556

1557
  """
1558
  HPATH = "node-remove"
1559
  HTYPE = constants.HTYPE_NODE
1560
  _OP_REQP = ["node_name"]
1561

    
1562
  def BuildHooksEnv(self):
1563
    """Build hooks env.
1564

1565
    This doesn't run on the target node in the pre phase as a failed
1566
    node would then be impossible to remove.
1567

1568
    """
1569
    env = {
1570
      "OP_TARGET": self.op.node_name,
1571
      "NODE_NAME": self.op.node_name,
1572
      }
1573
    all_nodes = self.cfg.GetNodeList()
1574
    all_nodes.remove(self.op.node_name)
1575
    return env, all_nodes, all_nodes
1576

    
1577
  def CheckPrereq(self):
1578
    """Check prerequisites.
1579

1580
    This checks:
1581
     - the node exists in the configuration
1582
     - it does not have primary or secondary instances
1583
     - it's not the master
1584

1585
    Any errors are signalled by raising errors.OpPrereqError.
1586

1587
    """
1588
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1589
    if node is None:
1590
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1591

    
1592
    instance_list = self.cfg.GetInstanceList()
1593

    
1594
    masternode = self.cfg.GetMasterNode()
1595
    if node.name == masternode:
1596
      raise errors.OpPrereqError("Node is the master node,"
1597
                                 " you need to failover first.")
1598

    
1599
    for instance_name in instance_list:
1600
      instance = self.cfg.GetInstanceInfo(instance_name)
1601
      if node.name == instance.primary_node:
1602
        raise errors.OpPrereqError("Instance %s still running on the node,"
1603
                                   " please remove first." % instance_name)
1604
      if node.name in instance.secondary_nodes:
1605
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1606
                                   " please remove first." % instance_name)
1607
    self.op.node_name = node.name
1608
    self.node = node
1609

    
1610
  def Exec(self, feedback_fn):
1611
    """Removes the node from the cluster.
1612

1613
    """
1614
    node = self.node
1615
    logging.info("Stopping the node daemon and removing configs from node %s",
1616
                 node.name)
1617

    
1618
    self.context.RemoveNode(node.name)
1619

    
1620
    self.rpc.call_node_leave_cluster(node.name)
1621

    
1622
    # Promote nodes to master candidate as needed
1623
    _AdjustCandidatePool(self)
1624

    
1625

    
1626
class LUQueryNodes(NoHooksLU):
1627
  """Logical unit for querying nodes.
1628

1629
  """
1630
  _OP_REQP = ["output_fields", "names"]
1631
  REQ_BGL = False
1632
  _FIELDS_DYNAMIC = utils.FieldSet(
1633
    "dtotal", "dfree",
1634
    "mtotal", "mnode", "mfree",
1635
    "bootid",
1636
    "ctotal",
1637
    )
1638

    
1639
  _FIELDS_STATIC = utils.FieldSet(
1640
    "name", "pinst_cnt", "sinst_cnt",
1641
    "pinst_list", "sinst_list",
1642
    "pip", "sip", "tags",
1643
    "serial_no",
1644
    "master_candidate",
1645
    "master",
1646
    "offline",
1647
    )
1648

    
1649
  def ExpandNames(self):
1650
    _CheckOutputFields(static=self._FIELDS_STATIC,
1651
                       dynamic=self._FIELDS_DYNAMIC,
1652
                       selected=self.op.output_fields)
1653

    
1654
    self.needed_locks = {}
1655
    self.share_locks[locking.LEVEL_NODE] = 1
1656

    
1657
    if self.op.names:
1658
      self.wanted = _GetWantedNodes(self, self.op.names)
1659
    else:
1660
      self.wanted = locking.ALL_SET
1661

    
1662
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1663
    if self.do_locking:
1664
      # if we don't request only static fields, we need to lock the nodes
1665
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1666

    
1667

    
1668
  def CheckPrereq(self):
1669
    """Check prerequisites.
1670

1671
    """
1672
    # The validation of the node list is done in the _GetWantedNodes,
1673
    # if non empty, and if empty, there's no validation to do
1674
    pass
1675

    
1676
  def Exec(self, feedback_fn):
1677
    """Computes the list of nodes and their attributes.
1678

1679
    """
1680
    all_info = self.cfg.GetAllNodesInfo()
1681
    if self.do_locking:
1682
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1683
    elif self.wanted != locking.ALL_SET:
1684
      nodenames = self.wanted
1685
      missing = set(nodenames).difference(all_info.keys())
1686
      if missing:
1687
        raise errors.OpExecError(
1688
          "Some nodes were removed before retrieving their data: %s" % missing)
1689
    else:
1690
      nodenames = all_info.keys()
1691

    
1692
    nodenames = utils.NiceSort(nodenames)
1693
    nodelist = [all_info[name] for name in nodenames]
1694

    
1695
    # begin data gathering
1696

    
1697
    if self.do_locking:
1698
      live_data = {}
1699
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1700
                                          self.cfg.GetHypervisorType())
1701
      for name in nodenames:
1702
        nodeinfo = node_data[name]
1703
        if not nodeinfo.failed and nodeinfo.data:
1704
          nodeinfo = nodeinfo.data
1705
          fn = utils.TryConvert
1706
          live_data[name] = {
1707
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1708
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1709
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
1710
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1711
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
1712
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1713
            "bootid": nodeinfo.get('bootid', None),
1714
            }
1715
        else:
1716
          live_data[name] = {}
1717
    else:
1718
      live_data = dict.fromkeys(nodenames, {})
1719

    
1720
    node_to_primary = dict([(name, set()) for name in nodenames])
1721
    node_to_secondary = dict([(name, set()) for name in nodenames])
1722

    
1723
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1724
                             "sinst_cnt", "sinst_list"))
1725
    if inst_fields & frozenset(self.op.output_fields):
1726
      instancelist = self.cfg.GetInstanceList()
1727

    
1728
      for instance_name in instancelist:
1729
        inst = self.cfg.GetInstanceInfo(instance_name)
1730
        if inst.primary_node in node_to_primary:
1731
          node_to_primary[inst.primary_node].add(inst.name)
1732
        for secnode in inst.secondary_nodes:
1733
          if secnode in node_to_secondary:
1734
            node_to_secondary[secnode].add(inst.name)
1735

    
1736
    master_node = self.cfg.GetMasterNode()
1737

    
1738
    # end data gathering
1739

    
1740
    output = []
1741
    for node in nodelist:
1742
      node_output = []
1743
      for field in self.op.output_fields:
1744
        if field == "name":
1745
          val = node.name
1746
        elif field == "pinst_list":
1747
          val = list(node_to_primary[node.name])
1748
        elif field == "sinst_list":
1749
          val = list(node_to_secondary[node.name])
1750
        elif field == "pinst_cnt":
1751
          val = len(node_to_primary[node.name])
1752
        elif field == "sinst_cnt":
1753
          val = len(node_to_secondary[node.name])
1754
        elif field == "pip":
1755
          val = node.primary_ip
1756
        elif field == "sip":
1757
          val = node.secondary_ip
1758
        elif field == "tags":
1759
          val = list(node.GetTags())
1760
        elif field == "serial_no":
1761
          val = node.serial_no
1762
        elif field == "master_candidate":
1763
          val = node.master_candidate
1764
        elif field == "master":
1765
          val = node.name == master_node
1766
        elif field == "offline":
1767
          val = node.offline
1768
        elif self._FIELDS_DYNAMIC.Matches(field):
1769
          val = live_data[node.name].get(field, None)
1770
        else:
1771
          raise errors.ParameterError(field)
1772
        node_output.append(val)
1773
      output.append(node_output)
1774

    
1775
    return output
1776

    
1777

    
1778
class LUQueryNodeVolumes(NoHooksLU):
1779
  """Logical unit for getting volumes on node(s).
1780

1781
  """
1782
  _OP_REQP = ["nodes", "output_fields"]
1783
  REQ_BGL = False
1784
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1785
  _FIELDS_STATIC = utils.FieldSet("node")
1786

    
1787
  def ExpandNames(self):
1788
    _CheckOutputFields(static=self._FIELDS_STATIC,
1789
                       dynamic=self._FIELDS_DYNAMIC,
1790
                       selected=self.op.output_fields)
1791

    
1792
    self.needed_locks = {}
1793
    self.share_locks[locking.LEVEL_NODE] = 1
1794
    if not self.op.nodes:
1795
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1796
    else:
1797
      self.needed_locks[locking.LEVEL_NODE] = \
1798
        _GetWantedNodes(self, self.op.nodes)
1799

    
1800
  def CheckPrereq(self):
1801
    """Check prerequisites.
1802

1803
    This checks that the fields required are valid output fields.
1804

1805
    """
1806
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1807

    
1808
  def Exec(self, feedback_fn):
1809
    """Computes the list of nodes and their attributes.
1810

1811
    """
1812
    nodenames = self.nodes
1813
    volumes = self.rpc.call_node_volumes(nodenames)
1814

    
1815
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1816
             in self.cfg.GetInstanceList()]
1817

    
1818
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1819

    
1820
    output = []
1821
    for node in nodenames:
1822
      if node not in volumes or volumes[node].failed or not volumes[node].data:
1823
        continue
1824

    
1825
      node_vols = volumes[node].data[:]
1826
      node_vols.sort(key=lambda vol: vol['dev'])
1827

    
1828
      for vol in node_vols:
1829
        node_output = []
1830
        for field in self.op.output_fields:
1831
          if field == "node":
1832
            val = node
1833
          elif field == "phys":
1834
            val = vol['dev']
1835
          elif field == "vg":
1836
            val = vol['vg']
1837
          elif field == "name":
1838
            val = vol['name']
1839
          elif field == "size":
1840
            val = int(float(vol['size']))
1841
          elif field == "instance":
1842
            for inst in ilist:
1843
              if node not in lv_by_node[inst]:
1844
                continue
1845
              if vol['name'] in lv_by_node[inst][node]:
1846
                val = inst.name
1847
                break
1848
            else:
1849
              val = '-'
1850
          else:
1851
            raise errors.ParameterError(field)
1852
          node_output.append(str(val))
1853

    
1854
        output.append(node_output)
1855

    
1856
    return output
1857

    
1858

    
1859
class LUAddNode(LogicalUnit):
1860
  """Logical unit for adding node to the cluster.
1861

1862
  """
1863
  HPATH = "node-add"
1864
  HTYPE = constants.HTYPE_NODE
1865
  _OP_REQP = ["node_name"]
1866

    
1867
  def BuildHooksEnv(self):
1868
    """Build hooks env.
1869

1870
    This will run on all nodes before, and on all nodes + the new node after.
1871

1872
    """
1873
    env = {
1874
      "OP_TARGET": self.op.node_name,
1875
      "NODE_NAME": self.op.node_name,
1876
      "NODE_PIP": self.op.primary_ip,
1877
      "NODE_SIP": self.op.secondary_ip,
1878
      }
1879
    nodes_0 = self.cfg.GetNodeList()
1880
    nodes_1 = nodes_0 + [self.op.node_name, ]
1881
    return env, nodes_0, nodes_1
1882

    
1883
  def CheckPrereq(self):
1884
    """Check prerequisites.
1885

1886
    This checks:
1887
     - the new node is not already in the config
1888
     - it is resolvable
1889
     - its parameters (single/dual homed) matches the cluster
1890

1891
    Any errors are signalled by raising errors.OpPrereqError.
1892

1893
    """
1894
    node_name = self.op.node_name
1895
    cfg = self.cfg
1896

    
1897
    dns_data = utils.HostInfo(node_name)
1898

    
1899
    node = dns_data.name
1900
    primary_ip = self.op.primary_ip = dns_data.ip
1901
    secondary_ip = getattr(self.op, "secondary_ip", None)
1902
    if secondary_ip is None:
1903
      secondary_ip = primary_ip
1904
    if not utils.IsValidIP(secondary_ip):
1905
      raise errors.OpPrereqError("Invalid secondary IP given")
1906
    self.op.secondary_ip = secondary_ip
1907

    
1908
    node_list = cfg.GetNodeList()
1909
    if not self.op.readd and node in node_list:
1910
      raise errors.OpPrereqError("Node %s is already in the configuration" %
1911
                                 node)
1912
    elif self.op.readd and node not in node_list:
1913
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
1914

    
1915
    for existing_node_name in node_list:
1916
      existing_node = cfg.GetNodeInfo(existing_node_name)
1917

    
1918
      if self.op.readd and node == existing_node_name:
1919
        if (existing_node.primary_ip != primary_ip or
1920
            existing_node.secondary_ip != secondary_ip):
1921
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
1922
                                     " address configuration as before")
1923
        continue
1924

    
1925
      if (existing_node.primary_ip == primary_ip or
1926
          existing_node.secondary_ip == primary_ip or
1927
          existing_node.primary_ip == secondary_ip or
1928
          existing_node.secondary_ip == secondary_ip):
1929
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1930
                                   " existing node %s" % existing_node.name)
1931

    
1932
    # check that the type of the node (single versus dual homed) is the
1933
    # same as for the master
1934
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
1935
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1936
    newbie_singlehomed = secondary_ip == primary_ip
1937
    if master_singlehomed != newbie_singlehomed:
1938
      if master_singlehomed:
1939
        raise errors.OpPrereqError("The master has no private ip but the"
1940
                                   " new node has one")
1941
      else:
1942
        raise errors.OpPrereqError("The master has a private ip but the"
1943
                                   " new node doesn't have one")
1944

    
1945
    # checks reachablity
1946
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1947
      raise errors.OpPrereqError("Node not reachable by ping")
1948

    
1949
    if not newbie_singlehomed:
1950
      # check reachability from my secondary ip to newbie's secondary ip
1951
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1952
                           source=myself.secondary_ip):
1953
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1954
                                   " based ping to noded port")
1955

    
1956
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
1957
    node_info = self.cfg.GetAllNodesInfo().values()
1958
    mc_now, _ = self.cfg.GetMasterCandidateStats()
1959
    master_candidate = mc_now < cp_size
1960

    
1961
    self.new_node = objects.Node(name=node,
1962
                                 primary_ip=primary_ip,
1963
                                 secondary_ip=secondary_ip,
1964
                                 master_candidate=master_candidate,
1965
                                 offline=False)
1966

    
1967
  def Exec(self, feedback_fn):
1968
    """Adds the new node to the cluster.
1969

1970
    """
1971
    new_node = self.new_node
1972
    node = new_node.name
1973

    
1974
    # check connectivity
1975
    result = self.rpc.call_version([node])[node]
1976
    result.Raise()
1977
    if result.data:
1978
      if constants.PROTOCOL_VERSION == result.data:
1979
        logging.info("Communication to node %s fine, sw version %s match",
1980
                     node, result.data)
1981
      else:
1982
        raise errors.OpExecError("Version mismatch master version %s,"
1983
                                 " node version %s" %
1984
                                 (constants.PROTOCOL_VERSION, result.data))
1985
    else:
1986
      raise errors.OpExecError("Cannot get version from the new node")
1987

    
1988
    # setup ssh on node
1989
    logging.info("Copy ssh key to node %s", node)
1990
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1991
    keyarray = []
1992
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1993
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1994
                priv_key, pub_key]
1995

    
1996
    for i in keyfiles:
1997
      f = open(i, 'r')
1998
      try:
1999
        keyarray.append(f.read())
2000
      finally:
2001
        f.close()
2002

    
2003
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2004
                                    keyarray[2],
2005
                                    keyarray[3], keyarray[4], keyarray[5])
2006

    
2007
    if result.failed or not result.data:
2008
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
2009

    
2010
    # Add node to our /etc/hosts, and add key to known_hosts
2011
    utils.AddHostToEtcHosts(new_node.name)
2012

    
2013
    if new_node.secondary_ip != new_node.primary_ip:
2014
      result = self.rpc.call_node_has_ip_address(new_node.name,
2015
                                                 new_node.secondary_ip)
2016
      if result.failed or not result.data:
2017
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2018
                                 " you gave (%s). Please fix and re-run this"
2019
                                 " command." % new_node.secondary_ip)
2020

    
2021
    node_verify_list = [self.cfg.GetMasterNode()]
2022
    node_verify_param = {
2023
      'nodelist': [node],
2024
      # TODO: do a node-net-test as well?
2025
    }
2026

    
2027
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2028
                                       self.cfg.GetClusterName())
2029
    for verifier in node_verify_list:
2030
      if result[verifier].failed or not result[verifier].data:
2031
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
2032
                                 " for remote verification" % verifier)
2033
      if result[verifier].data['nodelist']:
2034
        for failed in result[verifier].data['nodelist']:
2035
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2036
                      (verifier, result[verifier]['nodelist'][failed]))
2037
        raise errors.OpExecError("ssh/hostname verification failed.")
2038

    
2039
    # Distribute updated /etc/hosts and known_hosts to all nodes,
2040
    # including the node just added
2041
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2042
    dist_nodes = self.cfg.GetNodeList()
2043
    if not self.op.readd:
2044
      dist_nodes.append(node)
2045
    if myself.name in dist_nodes:
2046
      dist_nodes.remove(myself.name)
2047

    
2048
    logging.debug("Copying hosts and known_hosts to all nodes")
2049
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2050
      result = self.rpc.call_upload_file(dist_nodes, fname)
2051
      for to_node, to_result in result.iteritems():
2052
        if to_result.failed or not to_result.data:
2053
          logging.error("Copy of file %s to node %s failed", fname, to_node)
2054

    
2055
    to_copy = []
2056
    if constants.HT_XEN_HVM in self.cfg.GetClusterInfo().enabled_hypervisors:
2057
      to_copy.append(constants.VNC_PASSWORD_FILE)
2058
    for fname in to_copy:
2059
      result = self.rpc.call_upload_file([node], fname)
2060
      if result[node].failed or not result[node]:
2061
        logging.error("Could not copy file %s to node %s", fname, node)
2062

    
2063
    if self.op.readd:
2064
      self.context.ReaddNode(new_node)
2065
    else:
2066
      self.context.AddNode(new_node)
2067

    
2068

    
2069
class LUSetNodeParams(LogicalUnit):
2070
  """Modifies the parameters of a node.
2071

2072
  """
2073
  HPATH = "node-modify"
2074
  HTYPE = constants.HTYPE_NODE
2075
  _OP_REQP = ["node_name"]
2076
  REQ_BGL = False
2077

    
2078
  def CheckArguments(self):
2079
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2080
    if node_name is None:
2081
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2082
    self.op.node_name = node_name
2083
    if not hasattr(self.op, 'master_candidate'):
2084
      raise errors.OpPrereqError("Please pass at least one modification")
2085
    self.op.master_candidate = bool(self.op.master_candidate)
2086

    
2087
  def ExpandNames(self):
2088
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2089

    
2090
  def BuildHooksEnv(self):
2091
    """Build hooks env.
2092

2093
    This runs on the master node.
2094

2095
    """
2096
    env = {
2097
      "OP_TARGET": self.op.node_name,
2098
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2099
      }
2100
    nl = [self.cfg.GetMasterNode(),
2101
          self.op.node_name]
2102
    return env, nl, nl
2103

    
2104
  def CheckPrereq(self):
2105
    """Check prerequisites.
2106

2107
    This only checks the instance list against the existing names.
2108

2109
    """
2110
    force = self.force = self.op.force
2111

    
2112
    if self.op.master_candidate == False:
2113
      if self.op.node_name == self.cfg.GetMasterNode():
2114
        raise errors.OpPrereqError("The master node has to be a"
2115
                                   " master candidate")
2116
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2117
      node_info = self.cfg.GetAllNodesInfo().values()
2118
      num_candidates = len([node for node in node_info
2119
                            if node.master_candidate])
2120
      if num_candidates <= cp_size:
2121
        msg = ("Not enough master candidates (desired"
2122
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2123
        if force:
2124
          self.LogWarning(msg)
2125
        else:
2126
          raise errors.OpPrereqError(msg)
2127

    
2128
    return
2129

    
2130
  def Exec(self, feedback_fn):
2131
    """Modifies a node.
2132

2133
    """
2134
    node = self.cfg.GetNodeInfo(self.op.node_name)
2135

    
2136
    result = []
2137

    
2138
    if self.op.master_candidate is not None:
2139
      node.master_candidate = self.op.master_candidate
2140
      result.append(("master_candidate", str(self.op.master_candidate)))
2141
      if self.op.master_candidate == False:
2142
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2143
        if (rrc.failed or not isinstance(rrc.data, (tuple, list))
2144
            or len(rrc.data) != 2):
2145
          self.LogWarning("Node rpc error: %s" % rrc.error)
2146
        elif not rrc.data[0]:
2147
          self.LogWarning("Node failed to demote itself: %s" % rrc.data[1])
2148

    
2149
    # this will trigger configuration file update, if needed
2150
    self.cfg.Update(node)
2151
    # this will trigger job queue propagation or cleanup
2152
    if self.op.node_name != self.cfg.GetMasterNode():
2153
      self.context.ReaddNode(node)
2154

    
2155
    return result
2156

    
2157

    
2158
class LUQueryClusterInfo(NoHooksLU):
2159
  """Query cluster configuration.
2160

2161
  """
2162
  _OP_REQP = []
2163
  REQ_BGL = False
2164

    
2165
  def ExpandNames(self):
2166
    self.needed_locks = {}
2167

    
2168
  def CheckPrereq(self):
2169
    """No prerequsites needed for this LU.
2170

2171
    """
2172
    pass
2173

    
2174
  def Exec(self, feedback_fn):
2175
    """Return cluster config.
2176

2177
    """
2178
    cluster = self.cfg.GetClusterInfo()
2179
    result = {
2180
      "software_version": constants.RELEASE_VERSION,
2181
      "protocol_version": constants.PROTOCOL_VERSION,
2182
      "config_version": constants.CONFIG_VERSION,
2183
      "os_api_version": constants.OS_API_VERSION,
2184
      "export_version": constants.EXPORT_VERSION,
2185
      "architecture": (platform.architecture()[0], platform.machine()),
2186
      "name": cluster.cluster_name,
2187
      "master": cluster.master_node,
2188
      "default_hypervisor": cluster.default_hypervisor,
2189
      "enabled_hypervisors": cluster.enabled_hypervisors,
2190
      "hvparams": cluster.hvparams,
2191
      "beparams": cluster.beparams,
2192
      "candidate_pool_size": cluster.candidate_pool_size,
2193
      }
2194

    
2195
    return result
2196

    
2197

    
2198
class LUQueryConfigValues(NoHooksLU):
2199
  """Return configuration values.
2200

2201
  """
2202
  _OP_REQP = []
2203
  REQ_BGL = False
2204
  _FIELDS_DYNAMIC = utils.FieldSet()
2205
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2206

    
2207
  def ExpandNames(self):
2208
    self.needed_locks = {}
2209

    
2210
    _CheckOutputFields(static=self._FIELDS_STATIC,
2211
                       dynamic=self._FIELDS_DYNAMIC,
2212
                       selected=self.op.output_fields)
2213

    
2214
  def CheckPrereq(self):
2215
    """No prerequisites.
2216

2217
    """
2218
    pass
2219

    
2220
  def Exec(self, feedback_fn):
2221
    """Dump a representation of the cluster config to the standard output.
2222

2223
    """
2224
    values = []
2225
    for field in self.op.output_fields:
2226
      if field == "cluster_name":
2227
        entry = self.cfg.GetClusterName()
2228
      elif field == "master_node":
2229
        entry = self.cfg.GetMasterNode()
2230
      elif field == "drain_flag":
2231
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2232
      else:
2233
        raise errors.ParameterError(field)
2234
      values.append(entry)
2235
    return values
2236

    
2237

    
2238
class LUActivateInstanceDisks(NoHooksLU):
2239
  """Bring up an instance's disks.
2240

2241
  """
2242
  _OP_REQP = ["instance_name"]
2243
  REQ_BGL = False
2244

    
2245
  def ExpandNames(self):
2246
    self._ExpandAndLockInstance()
2247
    self.needed_locks[locking.LEVEL_NODE] = []
2248
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2249

    
2250
  def DeclareLocks(self, level):
2251
    if level == locking.LEVEL_NODE:
2252
      self._LockInstancesNodes()
2253

    
2254
  def CheckPrereq(self):
2255
    """Check prerequisites.
2256

2257
    This checks that the instance is in the cluster.
2258

2259
    """
2260
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2261
    assert self.instance is not None, \
2262
      "Cannot retrieve locked instance %s" % self.op.instance_name
2263

    
2264
  def Exec(self, feedback_fn):
2265
    """Activate the disks.
2266

2267
    """
2268
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2269
    if not disks_ok:
2270
      raise errors.OpExecError("Cannot activate block devices")
2271

    
2272
    return disks_info
2273

    
2274

    
2275
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2276
  """Prepare the block devices for an instance.
2277

2278
  This sets up the block devices on all nodes.
2279

2280
  @type lu: L{LogicalUnit}
2281
  @param lu: the logical unit on whose behalf we execute
2282
  @type instance: L{objects.Instance}
2283
  @param instance: the instance for whose disks we assemble
2284
  @type ignore_secondaries: boolean
2285
  @param ignore_secondaries: if true, errors on secondary nodes
2286
      won't result in an error return from the function
2287
  @return: False if the operation failed, otherwise a list of
2288
      (host, instance_visible_name, node_visible_name)
2289
      with the mapping from node devices to instance devices
2290

2291
  """
2292
  device_info = []
2293
  disks_ok = True
2294
  iname = instance.name
2295
  # With the two passes mechanism we try to reduce the window of
2296
  # opportunity for the race condition of switching DRBD to primary
2297
  # before handshaking occured, but we do not eliminate it
2298

    
2299
  # The proper fix would be to wait (with some limits) until the
2300
  # connection has been made and drbd transitions from WFConnection
2301
  # into any other network-connected state (Connected, SyncTarget,
2302
  # SyncSource, etc.)
2303

    
2304
  # 1st pass, assemble on all nodes in secondary mode
2305
  for inst_disk in instance.disks:
2306
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2307
      lu.cfg.SetDiskID(node_disk, node)
2308
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2309
      if result.failed or not result:
2310
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2311
                           " (is_primary=False, pass=1)",
2312
                           inst_disk.iv_name, node)
2313
        if not ignore_secondaries:
2314
          disks_ok = False
2315

    
2316
  # FIXME: race condition on drbd migration to primary
2317

    
2318
  # 2nd pass, do only the primary node
2319
  for inst_disk in instance.disks:
2320
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2321
      if node != instance.primary_node:
2322
        continue
2323
      lu.cfg.SetDiskID(node_disk, node)
2324
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2325
      if result.failed or not result:
2326
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2327
                           " (is_primary=True, pass=2)",
2328
                           inst_disk.iv_name, node)
2329
        disks_ok = False
2330
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
2331

    
2332
  # leave the disks configured for the primary node
2333
  # this is a workaround that would be fixed better by
2334
  # improving the logical/physical id handling
2335
  for disk in instance.disks:
2336
    lu.cfg.SetDiskID(disk, instance.primary_node)
2337

    
2338
  return disks_ok, device_info
2339

    
2340

    
2341
def _StartInstanceDisks(lu, instance, force):
2342
  """Start the disks of an instance.
2343

2344
  """
2345
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2346
                                           ignore_secondaries=force)
2347
  if not disks_ok:
2348
    _ShutdownInstanceDisks(lu, instance)
2349
    if force is not None and not force:
2350
      lu.proc.LogWarning("", hint="If the message above refers to a"
2351
                         " secondary node,"
2352
                         " you can retry the operation using '--force'.")
2353
    raise errors.OpExecError("Disk consistency error")
2354

    
2355

    
2356
class LUDeactivateInstanceDisks(NoHooksLU):
2357
  """Shutdown an instance's disks.
2358

2359
  """
2360
  _OP_REQP = ["instance_name"]
2361
  REQ_BGL = False
2362

    
2363
  def ExpandNames(self):
2364
    self._ExpandAndLockInstance()
2365
    self.needed_locks[locking.LEVEL_NODE] = []
2366
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2367

    
2368
  def DeclareLocks(self, level):
2369
    if level == locking.LEVEL_NODE:
2370
      self._LockInstancesNodes()
2371

    
2372
  def CheckPrereq(self):
2373
    """Check prerequisites.
2374

2375
    This checks that the instance is in the cluster.
2376

2377
    """
2378
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2379
    assert self.instance is not None, \
2380
      "Cannot retrieve locked instance %s" % self.op.instance_name
2381

    
2382
  def Exec(self, feedback_fn):
2383
    """Deactivate the disks
2384

2385
    """
2386
    instance = self.instance
2387
    _SafeShutdownInstanceDisks(self, instance)
2388

    
2389

    
2390
def _SafeShutdownInstanceDisks(lu, instance):
2391
  """Shutdown block devices of an instance.
2392

2393
  This function checks if an instance is running, before calling
2394
  _ShutdownInstanceDisks.
2395

2396
  """
2397
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2398
                                      [instance.hypervisor])
2399
  ins_l = ins_l[instance.primary_node]
2400
  if ins_l.failed or not isinstance(ins_l.data, list):
2401
    raise errors.OpExecError("Can't contact node '%s'" %
2402
                             instance.primary_node)
2403

    
2404
  if instance.name in ins_l.data:
2405
    raise errors.OpExecError("Instance is running, can't shutdown"
2406
                             " block devices.")
2407

    
2408
  _ShutdownInstanceDisks(lu, instance)
2409

    
2410

    
2411
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2412
  """Shutdown block devices of an instance.
2413

2414
  This does the shutdown on all nodes of the instance.
2415

2416
  If the ignore_primary is false, errors on the primary node are
2417
  ignored.
2418

2419
  """
2420
  result = True
2421
  for disk in instance.disks:
2422
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2423
      lu.cfg.SetDiskID(top_disk, node)
2424
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2425
      if result.failed or not result.data:
2426
        logging.error("Could not shutdown block device %s on node %s",
2427
                      disk.iv_name, node)
2428
        if not ignore_primary or node != instance.primary_node:
2429
          result = False
2430
  return result
2431

    
2432

    
2433
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor):
2434
  """Checks if a node has enough free memory.
2435

2436
  This function check if a given node has the needed amount of free
2437
  memory. In case the node has less memory or we cannot get the
2438
  information from the node, this function raise an OpPrereqError
2439
  exception.
2440

2441
  @type lu: C{LogicalUnit}
2442
  @param lu: a logical unit from which we get configuration data
2443
  @type node: C{str}
2444
  @param node: the node to check
2445
  @type reason: C{str}
2446
  @param reason: string to use in the error message
2447
  @type requested: C{int}
2448
  @param requested: the amount of memory in MiB to check for
2449
  @type hypervisor: C{str}
2450
  @param hypervisor: the hypervisor to ask for memory stats
2451
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2452
      we cannot check the node
2453

2454
  """
2455
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor)
2456
  nodeinfo[node].Raise()
2457
  free_mem = nodeinfo[node].data.get('memory_free')
2458
  if not isinstance(free_mem, int):
2459
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2460
                             " was '%s'" % (node, free_mem))
2461
  if requested > free_mem:
2462
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2463
                             " needed %s MiB, available %s MiB" %
2464
                             (node, reason, requested, free_mem))
2465

    
2466

    
2467
class LUStartupInstance(LogicalUnit):
2468
  """Starts an instance.
2469

2470
  """
2471
  HPATH = "instance-start"
2472
  HTYPE = constants.HTYPE_INSTANCE
2473
  _OP_REQP = ["instance_name", "force"]
2474
  REQ_BGL = False
2475

    
2476
  def ExpandNames(self):
2477
    self._ExpandAndLockInstance()
2478

    
2479
  def BuildHooksEnv(self):
2480
    """Build hooks env.
2481

2482
    This runs on master, primary and secondary nodes of the instance.
2483

2484
    """
2485
    env = {
2486
      "FORCE": self.op.force,
2487
      }
2488
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2489
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2490
          list(self.instance.secondary_nodes))
2491
    return env, nl, nl
2492

    
2493
  def CheckPrereq(self):
2494
    """Check prerequisites.
2495

2496
    This checks that the instance is in the cluster.
2497

2498
    """
2499
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2500
    assert self.instance is not None, \
2501
      "Cannot retrieve locked instance %s" % self.op.instance_name
2502

    
2503
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2504
    # check bridges existance
2505
    _CheckInstanceBridgesExist(self, instance)
2506

    
2507
    _CheckNodeFreeMemory(self, instance.primary_node,
2508
                         "starting instance %s" % instance.name,
2509
                         bep[constants.BE_MEMORY], instance.hypervisor)
2510

    
2511
  def Exec(self, feedback_fn):
2512
    """Start the instance.
2513

2514
    """
2515
    instance = self.instance
2516
    force = self.op.force
2517
    extra_args = getattr(self.op, "extra_args", "")
2518

    
2519
    self.cfg.MarkInstanceUp(instance.name)
2520

    
2521
    node_current = instance.primary_node
2522

    
2523
    _StartInstanceDisks(self, instance, force)
2524

    
2525
    result = self.rpc.call_instance_start(node_current, instance, extra_args)
2526
    if result.failed or not result.data:
2527
      _ShutdownInstanceDisks(self, instance)
2528
      raise errors.OpExecError("Could not start instance")
2529

    
2530

    
2531
class LURebootInstance(LogicalUnit):
2532
  """Reboot an instance.
2533

2534
  """
2535
  HPATH = "instance-reboot"
2536
  HTYPE = constants.HTYPE_INSTANCE
2537
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2538
  REQ_BGL = False
2539

    
2540
  def ExpandNames(self):
2541
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2542
                                   constants.INSTANCE_REBOOT_HARD,
2543
                                   constants.INSTANCE_REBOOT_FULL]:
2544
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2545
                                  (constants.INSTANCE_REBOOT_SOFT,
2546
                                   constants.INSTANCE_REBOOT_HARD,
2547
                                   constants.INSTANCE_REBOOT_FULL))
2548
    self._ExpandAndLockInstance()
2549

    
2550
  def BuildHooksEnv(self):
2551
    """Build hooks env.
2552

2553
    This runs on master, primary and secondary nodes of the instance.
2554

2555
    """
2556
    env = {
2557
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2558
      }
2559
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2560
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2561
          list(self.instance.secondary_nodes))
2562
    return env, nl, nl
2563

    
2564
  def CheckPrereq(self):
2565
    """Check prerequisites.
2566

2567
    This checks that the instance is in the cluster.
2568

2569
    """
2570
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2571
    assert self.instance is not None, \
2572
      "Cannot retrieve locked instance %s" % self.op.instance_name
2573

    
2574
    # check bridges existance
2575
    _CheckInstanceBridgesExist(self, instance)
2576

    
2577
  def Exec(self, feedback_fn):
2578
    """Reboot the instance.
2579

2580
    """
2581
    instance = self.instance
2582
    ignore_secondaries = self.op.ignore_secondaries
2583
    reboot_type = self.op.reboot_type
2584
    extra_args = getattr(self.op, "extra_args", "")
2585

    
2586
    node_current = instance.primary_node
2587

    
2588
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2589
                       constants.INSTANCE_REBOOT_HARD]:
2590
      result = self.rpc.call_instance_reboot(node_current, instance,
2591
                                             reboot_type, extra_args)
2592
      if result.failed or not result.data:
2593
        raise errors.OpExecError("Could not reboot instance")
2594
    else:
2595
      if not self.rpc.call_instance_shutdown(node_current, instance):
2596
        raise errors.OpExecError("could not shutdown instance for full reboot")
2597
      _ShutdownInstanceDisks(self, instance)
2598
      _StartInstanceDisks(self, instance, ignore_secondaries)
2599
      result = self.rpc.call_instance_start(node_current, instance, extra_args)
2600
      if result.failed or not result.data:
2601
        _ShutdownInstanceDisks(self, instance)
2602
        raise errors.OpExecError("Could not start instance for full reboot")
2603

    
2604
    self.cfg.MarkInstanceUp(instance.name)
2605

    
2606

    
2607
class LUShutdownInstance(LogicalUnit):
2608
  """Shutdown an instance.
2609

2610
  """
2611
  HPATH = "instance-stop"
2612
  HTYPE = constants.HTYPE_INSTANCE
2613
  _OP_REQP = ["instance_name"]
2614
  REQ_BGL = False
2615

    
2616
  def ExpandNames(self):
2617
    self._ExpandAndLockInstance()
2618

    
2619
  def BuildHooksEnv(self):
2620
    """Build hooks env.
2621

2622
    This runs on master, primary and secondary nodes of the instance.
2623

2624
    """
2625
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2626
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2627
          list(self.instance.secondary_nodes))
2628
    return env, nl, nl
2629

    
2630
  def CheckPrereq(self):
2631
    """Check prerequisites.
2632

2633
    This checks that the instance is in the cluster.
2634

2635
    """
2636
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2637
    assert self.instance is not None, \
2638
      "Cannot retrieve locked instance %s" % self.op.instance_name
2639

    
2640
  def Exec(self, feedback_fn):
2641
    """Shutdown the instance.
2642

2643
    """
2644
    instance = self.instance
2645
    node_current = instance.primary_node
2646
    self.cfg.MarkInstanceDown(instance.name)
2647
    result = self.rpc.call_instance_shutdown(node_current, instance)
2648
    if result.failed or not result.data:
2649
      self.proc.LogWarning("Could not shutdown instance")
2650

    
2651
    _ShutdownInstanceDisks(self, instance)
2652

    
2653

    
2654
class LUReinstallInstance(LogicalUnit):
2655
  """Reinstall an instance.
2656

2657
  """
2658
  HPATH = "instance-reinstall"
2659
  HTYPE = constants.HTYPE_INSTANCE
2660
  _OP_REQP = ["instance_name"]
2661
  REQ_BGL = False
2662

    
2663
  def ExpandNames(self):
2664
    self._ExpandAndLockInstance()
2665

    
2666
  def BuildHooksEnv(self):
2667
    """Build hooks env.
2668

2669
    This runs on master, primary and secondary nodes of the instance.
2670

2671
    """
2672
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2673
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2674
          list(self.instance.secondary_nodes))
2675
    return env, nl, nl
2676

    
2677
  def CheckPrereq(self):
2678
    """Check prerequisites.
2679

2680
    This checks that the instance is in the cluster and is not running.
2681

2682
    """
2683
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2684
    assert instance is not None, \
2685
      "Cannot retrieve locked instance %s" % self.op.instance_name
2686

    
2687
    if instance.disk_template == constants.DT_DISKLESS:
2688
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2689
                                 self.op.instance_name)
2690
    if instance.status != "down":
2691
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2692
                                 self.op.instance_name)
2693
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2694
                                              instance.name,
2695
                                              instance.hypervisor)
2696
    if remote_info.failed or remote_info.data:
2697
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2698
                                 (self.op.instance_name,
2699
                                  instance.primary_node))
2700

    
2701
    self.op.os_type = getattr(self.op, "os_type", None)
2702
    if self.op.os_type is not None:
2703
      # OS verification
2704
      pnode = self.cfg.GetNodeInfo(
2705
        self.cfg.ExpandNodeName(instance.primary_node))
2706
      if pnode is None:
2707
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2708
                                   self.op.pnode)
2709
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
2710
      result.Raise()
2711
      if not isinstance(result.data, objects.OS):
2712
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2713
                                   " primary node"  % self.op.os_type)
2714

    
2715
    self.instance = instance
2716

    
2717
  def Exec(self, feedback_fn):
2718
    """Reinstall the instance.
2719

2720
    """
2721
    inst = self.instance
2722

    
2723
    if self.op.os_type is not None:
2724
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2725
      inst.os = self.op.os_type
2726
      self.cfg.Update(inst)
2727

    
2728
    _StartInstanceDisks(self, inst, None)
2729
    try:
2730
      feedback_fn("Running the instance OS create scripts...")
2731
      result = self.rpc.call_instance_os_add(inst.primary_node, inst)
2732
      result.Raise()
2733
      if not result.data:
2734
        raise errors.OpExecError("Could not install OS for instance %s"
2735
                                 " on node %s" %
2736
                                 (inst.name, inst.primary_node))
2737
    finally:
2738
      _ShutdownInstanceDisks(self, inst)
2739

    
2740

    
2741
class LURenameInstance(LogicalUnit):
2742
  """Rename an instance.
2743

2744
  """
2745
  HPATH = "instance-rename"
2746
  HTYPE = constants.HTYPE_INSTANCE
2747
  _OP_REQP = ["instance_name", "new_name"]
2748

    
2749
  def BuildHooksEnv(self):
2750
    """Build hooks env.
2751

2752
    This runs on master, primary and secondary nodes of the instance.
2753

2754
    """
2755
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2756
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2757
    nl = ([self.cfg.GetMasterNode(), self.instance.primary_node] +
2758
          list(self.instance.secondary_nodes))
2759
    return env, nl, nl
2760

    
2761
  def CheckPrereq(self):
2762
    """Check prerequisites.
2763

2764
    This checks that the instance is in the cluster and is not running.
2765

2766
    """
2767
    instance = self.cfg.GetInstanceInfo(
2768
      self.cfg.ExpandInstanceName(self.op.instance_name))
2769
    if instance is None:
2770
      raise errors.OpPrereqError("Instance '%s' not known" %
2771
                                 self.op.instance_name)
2772
    if instance.status != "down":
2773
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2774
                                 self.op.instance_name)
2775
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2776
                                              instance.name,
2777
                                              instance.hypervisor)
2778
    remote_info.Raise()
2779
    if remote_info.data:
2780
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2781
                                 (self.op.instance_name,
2782
                                  instance.primary_node))
2783
    self.instance = instance
2784

    
2785
    # new name verification
2786
    name_info = utils.HostInfo(self.op.new_name)
2787

    
2788
    self.op.new_name = new_name = name_info.name
2789
    instance_list = self.cfg.GetInstanceList()
2790
    if new_name in instance_list:
2791
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2792
                                 new_name)
2793

    
2794
    if not getattr(self.op, "ignore_ip", False):
2795
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2796
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2797
                                   (name_info.ip, new_name))
2798

    
2799

    
2800
  def Exec(self, feedback_fn):
2801
    """Reinstall the instance.
2802

2803
    """
2804
    inst = self.instance
2805
    old_name = inst.name
2806

    
2807
    if inst.disk_template == constants.DT_FILE:
2808
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2809

    
2810
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2811
    # Change the instance lock. This is definitely safe while we hold the BGL
2812
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
2813
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2814

    
2815
    # re-read the instance from the configuration after rename
2816
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2817

    
2818
    if inst.disk_template == constants.DT_FILE:
2819
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2820
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2821
                                                     old_file_storage_dir,
2822
                                                     new_file_storage_dir)
2823
      result.Raise()
2824
      if not result.data:
2825
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2826
                                 " directory '%s' to '%s' (but the instance"
2827
                                 " has been renamed in Ganeti)" % (
2828
                                 inst.primary_node, old_file_storage_dir,
2829
                                 new_file_storage_dir))
2830

    
2831
      if not result.data[0]:
2832
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2833
                                 " (but the instance has been renamed in"
2834
                                 " Ganeti)" % (old_file_storage_dir,
2835
                                               new_file_storage_dir))
2836

    
2837
    _StartInstanceDisks(self, inst, None)
2838
    try:
2839
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
2840
                                                 old_name)
2841
      if result.failed or not result.data:
2842
        msg = ("Could not run OS rename script for instance %s on node %s"
2843
               " (but the instance has been renamed in Ganeti)" %
2844
               (inst.name, inst.primary_node))
2845
        self.proc.LogWarning(msg)
2846
    finally:
2847
      _ShutdownInstanceDisks(self, inst)
2848

    
2849

    
2850
class LURemoveInstance(LogicalUnit):
2851
  """Remove an instance.
2852

2853
  """
2854
  HPATH = "instance-remove"
2855
  HTYPE = constants.HTYPE_INSTANCE
2856
  _OP_REQP = ["instance_name", "ignore_failures"]
2857
  REQ_BGL = False
2858

    
2859
  def ExpandNames(self):
2860
    self._ExpandAndLockInstance()
2861
    self.needed_locks[locking.LEVEL_NODE] = []
2862
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2863

    
2864
  def DeclareLocks(self, level):
2865
    if level == locking.LEVEL_NODE:
2866
      self._LockInstancesNodes()
2867

    
2868
  def BuildHooksEnv(self):
2869
    """Build hooks env.
2870

2871
    This runs on master, primary and secondary nodes of the instance.
2872

2873
    """
2874
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2875
    nl = [self.cfg.GetMasterNode()]
2876
    return env, nl, nl
2877

    
2878
  def CheckPrereq(self):
2879
    """Check prerequisites.
2880

2881
    This checks that the instance is in the cluster.
2882

2883
    """
2884
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2885
    assert self.instance is not None, \
2886
      "Cannot retrieve locked instance %s" % self.op.instance_name
2887

    
2888
  def Exec(self, feedback_fn):
2889
    """Remove the instance.
2890

2891
    """
2892
    instance = self.instance
2893
    logging.info("Shutting down instance %s on node %s",
2894
                 instance.name, instance.primary_node)
2895

    
2896
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
2897
    if result.failed or not result.data:
2898
      if self.op.ignore_failures:
2899
        feedback_fn("Warning: can't shutdown instance")
2900
      else:
2901
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2902
                                 (instance.name, instance.primary_node))
2903

    
2904
    logging.info("Removing block devices for instance %s", instance.name)
2905

    
2906
    if not _RemoveDisks(self, instance):
2907
      if self.op.ignore_failures:
2908
        feedback_fn("Warning: can't remove instance's disks")
2909
      else:
2910
        raise errors.OpExecError("Can't remove instance's disks")
2911

    
2912
    logging.info("Removing instance %s out of cluster config", instance.name)
2913

    
2914
    self.cfg.RemoveInstance(instance.name)
2915
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
2916

    
2917

    
2918
class LUQueryInstances(NoHooksLU):
2919
  """Logical unit for querying instances.
2920

2921
  """
2922
  _OP_REQP = ["output_fields", "names"]
2923
  REQ_BGL = False
2924
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
2925
                                    "admin_state", "admin_ram",
2926
                                    "disk_template", "ip", "mac", "bridge",
2927
                                    "sda_size", "sdb_size", "vcpus", "tags",
2928
                                    "network_port", "beparams",
2929
                                    "(disk).(size)/([0-9]+)",
2930
                                    "(disk).(sizes)",
2931
                                    "(nic).(mac|ip|bridge)/([0-9]+)",
2932
                                    "(nic).(macs|ips|bridges)",
2933
                                    "(disk|nic).(count)",
2934
                                    "serial_no", "hypervisor", "hvparams",] +
2935
                                  ["hv/%s" % name
2936
                                   for name in constants.HVS_PARAMETERS] +
2937
                                  ["be/%s" % name
2938
                                   for name in constants.BES_PARAMETERS])
2939
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
2940

    
2941

    
2942
  def ExpandNames(self):
2943
    _CheckOutputFields(static=self._FIELDS_STATIC,
2944
                       dynamic=self._FIELDS_DYNAMIC,
2945
                       selected=self.op.output_fields)
2946

    
2947
    self.needed_locks = {}
2948
    self.share_locks[locking.LEVEL_INSTANCE] = 1
2949
    self.share_locks[locking.LEVEL_NODE] = 1
2950

    
2951
    if self.op.names:
2952
      self.wanted = _GetWantedInstances(self, self.op.names)
2953
    else:
2954
      self.wanted = locking.ALL_SET
2955

    
2956
    self.do_locking = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2957
    if self.do_locking:
2958
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
2959
      self.needed_locks[locking.LEVEL_NODE] = []
2960
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2961

    
2962
  def DeclareLocks(self, level):
2963
    if level == locking.LEVEL_NODE and self.do_locking:
2964
      self._LockInstancesNodes()
2965

    
2966
  def CheckPrereq(self):
2967
    """Check prerequisites.
2968

2969
    """
2970
    pass
2971

    
2972
  def Exec(self, feedback_fn):
2973
    """Computes the list of nodes and their attributes.
2974

2975
    """
2976
    all_info = self.cfg.GetAllInstancesInfo()
2977
    if self.do_locking:
2978
      instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2979
    elif self.wanted != locking.ALL_SET:
2980
      instance_names = self.wanted
2981
      missing = set(instance_names).difference(all_info.keys())
2982
      if missing:
2983
        raise errors.OpExecError(
2984
          "Some instances were removed before retrieving their data: %s"
2985
          % missing)
2986
    else:
2987
      instance_names = all_info.keys()
2988

    
2989
    instance_names = utils.NiceSort(instance_names)
2990
    instance_list = [all_info[iname] for iname in instance_names]
2991

    
2992
    # begin data gathering
2993

    
2994
    nodes = frozenset([inst.primary_node for inst in instance_list])
2995
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
2996

    
2997
    bad_nodes = []
2998
    off_nodes = []
2999
    if self.do_locking:
3000
      live_data = {}
3001
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3002
      for name in nodes:
3003
        result = node_data[name]
3004
        if result.offline:
3005
          # offline nodes will be in both lists
3006
          off_nodes.append(name)
3007
        if result.failed:
3008
          bad_nodes.append(name)
3009
        else:
3010
          if result.data:
3011
            live_data.update(result.data)
3012
            # else no instance is alive
3013
    else:
3014
      live_data = dict([(name, {}) for name in instance_names])
3015

    
3016
    # end data gathering
3017

    
3018
    HVPREFIX = "hv/"
3019
    BEPREFIX = "be/"
3020
    output = []
3021
    for instance in instance_list:
3022
      iout = []
3023
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3024
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
3025
      for field in self.op.output_fields:
3026
        st_match = self._FIELDS_STATIC.Matches(field)
3027
        if field == "name":
3028
          val = instance.name
3029
        elif field == "os":
3030
          val = instance.os
3031
        elif field == "pnode":
3032
          val = instance.primary_node
3033
        elif field == "snodes":
3034
          val = list(instance.secondary_nodes)
3035
        elif field == "admin_state":
3036
          val = (instance.status != "down")
3037
        elif field == "oper_state":
3038
          if instance.primary_node in bad_nodes:
3039
            val = None
3040
          else:
3041
            val = bool(live_data.get(instance.name))
3042
        elif field == "status":
3043
          if instance.primary_node in off_nodes:
3044
            val = "ERROR_nodeoffline"
3045
          elif instance.primary_node in bad_nodes:
3046
            val = "ERROR_nodedown"
3047
          else:
3048
            running = bool(live_data.get(instance.name))
3049
            if running:
3050
              if instance.status != "down":
3051
                val = "running"
3052
              else:
3053
                val = "ERROR_up"
3054
            else:
3055
              if instance.status != "down":
3056
                val = "ERROR_down"
3057
              else:
3058
                val = "ADMIN_down"
3059
        elif field == "oper_ram":
3060
          if instance.primary_node in bad_nodes:
3061
            val = None
3062
          elif instance.name in live_data:
3063
            val = live_data[instance.name].get("memory", "?")
3064
          else:
3065
            val = "-"
3066
        elif field == "disk_template":
3067
          val = instance.disk_template
3068
        elif field == "ip":
3069
          val = instance.nics[0].ip
3070
        elif field == "bridge":
3071
          val = instance.nics[0].bridge
3072
        elif field == "mac":
3073
          val = instance.nics[0].mac
3074
        elif field == "sda_size" or field == "sdb_size":
3075
          idx = ord(field[2]) - ord('a')
3076
          try:
3077
            val = instance.FindDisk(idx).size
3078
          except errors.OpPrereqError:
3079
            val = None
3080
        elif field == "tags":
3081
          val = list(instance.GetTags())
3082
        elif field == "serial_no":
3083
          val = instance.serial_no
3084
        elif field == "network_port":
3085
          val = instance.network_port
3086
        elif field == "hypervisor":
3087
          val = instance.hypervisor
3088
        elif field == "hvparams":
3089
          val = i_hv
3090
        elif (field.startswith(HVPREFIX) and
3091
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3092
          val = i_hv.get(field[len(HVPREFIX):], None)
3093
        elif field == "beparams":
3094
          val = i_be
3095
        elif (field.startswith(BEPREFIX) and
3096
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3097
          val = i_be.get(field[len(BEPREFIX):], None)
3098
        elif st_match and st_match.groups():
3099
          # matches a variable list
3100
          st_groups = st_match.groups()
3101
          if st_groups and st_groups[0] == "disk":
3102
            if st_groups[1] == "count":
3103
              val = len(instance.disks)
3104
            elif st_groups[1] == "sizes":
3105
              val = [disk.size for disk in instance.disks]
3106
            elif st_groups[1] == "size":
3107
              try:
3108
                val = instance.FindDisk(st_groups[2]).size
3109
              except errors.OpPrereqError:
3110
                val = None
3111
            else:
3112
              assert False, "Unhandled disk parameter"
3113
          elif st_groups[0] == "nic":
3114
            if st_groups[1] == "count":
3115
              val = len(instance.nics)
3116
            elif st_groups[1] == "macs":
3117
              val = [nic.mac for nic in instance.nics]
3118
            elif st_groups[1] == "ips":
3119
              val = [nic.ip for nic in instance.nics]
3120
            elif st_groups[1] == "bridges":
3121
              val = [nic.bridge for nic in instance.nics]
3122
            else:
3123
              # index-based item
3124
              nic_idx = int(st_groups[2])
3125
              if nic_idx >= len(instance.nics):
3126
                val = None
3127
              else:
3128
                if st_groups[1] == "mac":
3129
                  val = instance.nics[nic_idx].mac
3130
                elif st_groups[1] == "ip":
3131
                  val = instance.nics[nic_idx].ip
3132
                elif st_groups[1] == "bridge":
3133
                  val = instance.nics[nic_idx].bridge
3134
                else:
3135
                  assert False, "Unhandled NIC parameter"
3136
          else:
3137
            assert False, "Unhandled variable parameter"
3138
        else:
3139
          raise errors.ParameterError(field)
3140
        iout.append(val)
3141
      output.append(iout)
3142

    
3143
    return output
3144

    
3145

    
3146
class LUFailoverInstance(LogicalUnit):
3147
  """Failover an instance.
3148

3149
  """
3150
  HPATH = "instance-failover"
3151
  HTYPE = constants.HTYPE_INSTANCE
3152
  _OP_REQP = ["instance_name", "ignore_consistency"]
3153
  REQ_BGL = False
3154

    
3155
  def ExpandNames(self):
3156
    self._ExpandAndLockInstance()
3157
    self.needed_locks[locking.LEVEL_NODE] = []
3158
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3159

    
3160
  def DeclareLocks(self, level):
3161
    if level == locking.LEVEL_NODE:
3162
      self._LockInstancesNodes()
3163

    
3164
  def BuildHooksEnv(self):
3165
    """Build hooks env.
3166

3167
    This runs on master, primary and secondary nodes of the instance.
3168

3169
    """
3170
    env = {
3171
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3172
      }
3173
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3174
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3175
    return env, nl, nl
3176

    
3177
  def CheckPrereq(self):
3178
    """Check prerequisites.
3179

3180
    This checks that the instance is in the cluster.
3181

3182
    """
3183
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3184
    assert self.instance is not None, \
3185
      "Cannot retrieve locked instance %s" % self.op.instance_name
3186

    
3187
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3188
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3189
      raise errors.OpPrereqError("Instance's disk layout is not"
3190
                                 " network mirrored, cannot failover.")
3191

    
3192
    secondary_nodes = instance.secondary_nodes
3193
    if not secondary_nodes:
3194
      raise errors.ProgrammerError("no secondary node but using "
3195
                                   "a mirrored disk template")
3196

    
3197
    target_node = secondary_nodes[0]
3198
    # check memory requirements on the secondary node
3199
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3200
                         instance.name, bep[constants.BE_MEMORY],
3201
                         instance.hypervisor)
3202

    
3203
    # check bridge existance
3204
    brlist = [nic.bridge for nic in instance.nics]
3205
    result = self.rpc.call_bridges_exist(target_node, brlist)
3206
    result.Raise()
3207
    if not result.data:
3208
      raise errors.OpPrereqError("One or more target bridges %s does not"
3209
                                 " exist on destination node '%s'" %
3210
                                 (brlist, target_node))
3211

    
3212
  def Exec(self, feedback_fn):
3213
    """Failover an instance.
3214

3215
    The failover is done by shutting it down on its present node and
3216
    starting it on the secondary.
3217

3218
    """
3219
    instance = self.instance
3220

    
3221
    source_node = instance.primary_node
3222
    target_node = instance.secondary_nodes[0]
3223

    
3224
    feedback_fn("* checking disk consistency between source and target")
3225
    for dev in instance.disks:
3226
      # for drbd, these are drbd over lvm
3227
      if not _CheckDiskConsistency(self, dev, target_node, False):
3228
        if instance.status == "up" and not self.op.ignore_consistency:
3229
          raise errors.OpExecError("Disk %s is degraded on target node,"
3230
                                   " aborting failover." % dev.iv_name)
3231

    
3232
    feedback_fn("* shutting down instance on source node")
3233
    logging.info("Shutting down instance %s on node %s",
3234
                 instance.name, source_node)
3235

    
3236
    result = self.rpc.call_instance_shutdown(source_node, instance)
3237
    if result.failed or not result.data:
3238
      if self.op.ignore_consistency:
3239
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3240
                             " Proceeding"
3241
                             " anyway. Please make sure node %s is down",
3242
                             instance.name, source_node, source_node)
3243
      else:
3244
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3245
                                 (instance.name, source_node))
3246

    
3247
    feedback_fn("* deactivating the instance's disks on source node")
3248
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3249
      raise errors.OpExecError("Can't shut down the instance's disks.")
3250

    
3251
    instance.primary_node = target_node
3252
    # distribute new instance config to the other nodes
3253
    self.cfg.Update(instance)
3254

    
3255
    # Only start the instance if it's marked as up
3256
    if instance.status == "up":
3257
      feedback_fn("* activating the instance's disks on target node")
3258
      logging.info("Starting instance %s on node %s",
3259
                   instance.name, target_node)
3260

    
3261
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3262
                                               ignore_secondaries=True)
3263
      if not disks_ok:
3264
        _ShutdownInstanceDisks(self, instance)
3265
        raise errors.OpExecError("Can't activate the instance's disks")
3266

    
3267
      feedback_fn("* starting the instance on the target node")
3268
      result = self.rpc.call_instance_start(target_node, instance, None)
3269
      if result.failed or not result.data:
3270
        _ShutdownInstanceDisks(self, instance)
3271
        raise errors.OpExecError("Could not start instance %s on node %s." %
3272
                                 (instance.name, target_node))
3273

    
3274

    
3275
def _CreateBlockDevOnPrimary(lu, node, instance, device, info):
3276
  """Create a tree of block devices on the primary node.
3277

3278
  This always creates all devices.
3279

3280
  """
3281
  if device.children:
3282
    for child in device.children:
3283
      if not _CreateBlockDevOnPrimary(lu, node, instance, child, info):
3284
        return False
3285

    
3286
  lu.cfg.SetDiskID(device, node)
3287
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3288
                                       instance.name, True, info)
3289
  if new_id.failed or not new_id.data:
3290
    return False
3291
  if device.physical_id is None:
3292
    device.physical_id = new_id
3293
  return True
3294

    
3295

    
3296
def _CreateBlockDevOnSecondary(lu, node, instance, device, force, info):
3297
  """Create a tree of block devices on a secondary node.
3298

3299
  If this device type has to be created on secondaries, create it and
3300
  all its children.
3301

3302
  If not, just recurse to children keeping the same 'force' value.
3303

3304
  """
3305
  if device.CreateOnSecondary():
3306
    force = True
3307
  if device.children:
3308
    for child in device.children:
3309
      if not _CreateBlockDevOnSecondary(lu, node, instance,
3310
                                        child, force, info):
3311
        return False
3312

    
3313
  if not force:
3314
    return True
3315
  lu.cfg.SetDiskID(device, node)
3316
  new_id = lu.rpc.call_blockdev_create(node, device, device.size,
3317
                                       instance.name, False, info)
3318
  if new_id.failed or not new_id.data:
3319
    return False
3320
  if device.physical_id is None:
3321
    device.physical_id = new_id
3322
  return True
3323

    
3324

    
3325
def _GenerateUniqueNames(lu, exts):
3326
  """Generate a suitable LV name.
3327

3328
  This will generate a logical volume name for the given instance.
3329

3330
  """
3331
  results = []
3332
  for val in exts:
3333
    new_id = lu.cfg.GenerateUniqueID()
3334
    results.append("%s%s" % (new_id, val))
3335
  return results
3336

    
3337

    
3338
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3339
                         p_minor, s_minor):
3340
  """Generate a drbd8 device complete with its children.
3341

3342
  """
3343
  port = lu.cfg.AllocatePort()
3344
  vgname = lu.cfg.GetVGName()
3345
  shared_secret = lu.cfg.GenerateDRBDSecret()
3346
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3347
                          logical_id=(vgname, names[0]))
3348
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3349
                          logical_id=(vgname, names[1]))
3350
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3351
                          logical_id=(primary, secondary, port,
3352
                                      p_minor, s_minor,
3353
                                      shared_secret),
3354
                          children=[dev_data, dev_meta],
3355
                          iv_name=iv_name)
3356
  return drbd_dev
3357

    
3358

    
3359
def _GenerateDiskTemplate(lu, template_name,
3360
                          instance_name, primary_node,
3361
                          secondary_nodes, disk_info,
3362
                          file_storage_dir, file_driver,
3363
                          base_index):
3364
  """Generate the entire disk layout for a given template type.
3365

3366
  """
3367
  #TODO: compute space requirements
3368

    
3369
  vgname = lu.cfg.GetVGName()
3370
  disk_count = len(disk_info)
3371
  disks = []
3372
  if template_name == constants.DT_DISKLESS:
3373
    pass
3374
  elif template_name == constants.DT_PLAIN:
3375
    if len(secondary_nodes) != 0:
3376
      raise errors.ProgrammerError("Wrong template configuration")
3377

    
3378
    names = _GenerateUniqueNames(lu, [".disk%d" % i
3379
                                      for i in range(disk_count)])
3380
    for idx, disk in enumerate(disk_info):
3381
      disk_index = idx + base_index
3382
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
3383
                              logical_id=(vgname, names[idx]),
3384
                              iv_name="disk/%d" % disk_index)
3385
      disks.append(disk_dev)
3386
  elif template_name == constants.DT_DRBD8:
3387
    if len(secondary_nodes) != 1:
3388
      raise errors.ProgrammerError("Wrong template configuration")
3389
    remote_node = secondary_nodes[0]
3390
    minors = lu.cfg.AllocateDRBDMinor(
3391
      [primary_node, remote_node] * len(disk_info), instance_name)
3392

    
3393
    names = _GenerateUniqueNames(lu,
3394
                                 [".disk%d_%s" % (i, s)
3395
                                  for i in range(disk_count)
3396
                                  for s in ("data", "meta")
3397
                                  ])
3398
    for idx, disk in enumerate(disk_info):
3399
      disk_index = idx + base_index
3400
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3401
                                      disk["size"], names[idx*2:idx*2+2],
3402
                                      "disk/%d" % disk_index,
3403
                                      minors[idx*2], minors[idx*2+1])
3404
      disks.append(disk_dev)
3405
  elif template_name == constants.DT_FILE:
3406
    if len(secondary_nodes) != 0:
3407
      raise errors.ProgrammerError("Wrong template configuration")
3408

    
3409
    for idx, disk in enumerate(disk_info):
3410
      disk_index = idx + base_index
3411
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
3412
                              iv_name="disk/%d" % disk_index,
3413
                              logical_id=(file_driver,
3414
                                          "%s/disk%d" % (file_storage_dir,
3415
                                                         idx)))
3416
      disks.append(disk_dev)
3417
  else:
3418
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3419
  return disks
3420

    
3421

    
3422
def _GetInstanceInfoText(instance):
3423
  """Compute that text that should be added to the disk's metadata.
3424

3425
  """
3426
  return "originstname+%s" % instance.name
3427

    
3428

    
3429
def _CreateDisks(lu, instance):
3430
  """Create all disks for an instance.
3431

3432
  This abstracts away some work from AddInstance.
3433

3434
  @type lu: L{LogicalUnit}
3435
  @param lu: the logical unit on whose behalf we execute
3436
  @type instance: L{objects.Instance}
3437
  @param instance: the instance whose disks we should create
3438
  @rtype: boolean
3439
  @return: the success of the creation
3440

3441
  """
3442
  info = _GetInstanceInfoText(instance)
3443

    
3444
  if instance.disk_template == constants.DT_FILE:
3445
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3446
    result = lu.rpc.call_file_storage_dir_create(instance.primary_node,
3447
                                                 file_storage_dir)
3448

    
3449
    if result.failed or not result.data:
3450
      logging.error("Could not connect to node '%s'", instance.primary_node)
3451
      return False
3452

    
3453
    if not result.data[0]:
3454
      logging.error("Failed to create directory '%s'", file_storage_dir)
3455
      return False
3456

    
3457
  # Note: this needs to be kept in sync with adding of disks in
3458
  # LUSetInstanceParams
3459
  for device in instance.disks:
3460
    logging.info("Creating volume %s for instance %s",
3461
                 device.iv_name, instance.name)
3462
    #HARDCODE
3463
    for secondary_node in instance.secondary_nodes:
3464
      if not _CreateBlockDevOnSecondary(lu, secondary_node, instance,
3465
                                        device, False, info):
3466
        logging.error("Failed to create volume %s (%s) on secondary node %s!",
3467
                      device.iv_name, device, secondary_node)
3468
        return False
3469
    #HARDCODE
3470
    if not _CreateBlockDevOnPrimary(lu, instance.primary_node,
3471
                                    instance, device, info):
3472
      logging.error("Failed to create volume %s on primary!", device.iv_name)
3473
      return False
3474

    
3475
  return True
3476

    
3477

    
3478
def _RemoveDisks(lu, instance):
3479
  """Remove all disks for an instance.
3480

3481
  This abstracts away some work from `AddInstance()` and
3482
  `RemoveInstance()`. Note that in case some of the devices couldn't
3483
  be removed, the removal will continue with the other ones (compare
3484
  with `_CreateDisks()`).
3485

3486
  @type lu: L{LogicalUnit}
3487
  @param lu: the logical unit on whose behalf we execute
3488
  @type instance: L{objects.Instance}
3489
  @param instance: the instance whose disks we should remove
3490
  @rtype: boolean
3491
  @return: the success of the removal
3492

3493
  """
3494
  logging.info("Removing block devices for instance %s", instance.name)
3495

    
3496
  result = True
3497
  for device in instance.disks:
3498
    for node, disk in device.ComputeNodeTree(instance.primary_node):
3499
      lu.cfg.SetDiskID(disk, node)
3500
      result = lu.rpc.call_blockdev_remove(node, disk)
3501
      if result.failed or not result.data:
3502
        lu.proc.LogWarning("Could not remove block device %s on node %s,"
3503
                           " continuing anyway", device.iv_name, node)
3504
        result = False
3505

    
3506
  if instance.disk_template == constants.DT_FILE:
3507
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
3508
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
3509
                                                 file_storage_dir)
3510
    if result.failed or not result.data:
3511
      logging.error("Could not remove directory '%s'", file_storage_dir)
3512
      result = False
3513

    
3514
  return result
3515

    
3516

    
3517
def _ComputeDiskSize(disk_template, disks):
3518
  """Compute disk size requirements in the volume group
3519

3520
  """
3521
  # Required free disk space as a function of disk and swap space
3522
  req_size_dict = {
3523
    constants.DT_DISKLESS: None,
3524
    constants.DT_PLAIN: sum(d["size"] for d in disks),
3525
    # 128 MB are added for drbd metadata for each disk
3526
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
3527
    constants.DT_FILE: None,
3528
  }
3529

    
3530
  if disk_template not in req_size_dict:
3531
    raise errors.ProgrammerError("Disk template '%s' size requirement"
3532
                                 " is unknown" %  disk_template)
3533

    
3534
  return req_size_dict[disk_template]
3535

    
3536

    
3537
def _CheckHVParams(lu, nodenames, hvname, hvparams):
3538
  """Hypervisor parameter validation.
3539

3540
  This function abstract the hypervisor parameter validation to be
3541
  used in both instance create and instance modify.
3542

3543
  @type lu: L{LogicalUnit}
3544
  @param lu: the logical unit for which we check
3545
  @type nodenames: list
3546
  @param nodenames: the list of nodes on which we should check
3547
  @type hvname: string
3548
  @param hvname: the name of the hypervisor we should use
3549
  @type hvparams: dict
3550
  @param hvparams: the parameters which we need to check
3551
  @raise errors.OpPrereqError: if the parameters are not valid
3552

3553
  """
3554
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
3555
                                                  hvname,
3556
                                                  hvparams)
3557
  for node in nodenames:
3558
    info = hvinfo[node]
3559
    info.Raise()
3560
    if not info.data or not isinstance(info.data, (tuple, list)):
3561
      raise errors.OpPrereqError("Cannot get current information"
3562
                                 " from node '%s' (%s)" % (node, info.data))
3563
    if not info.data[0]:
3564
      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
3565
                                 " %s" % info.data[1])
3566

    
3567

    
3568
class LUCreateInstance(LogicalUnit):
3569
  """Create an instance.
3570

3571
  """
3572
  HPATH = "instance-add"
3573
  HTYPE = constants.HTYPE_INSTANCE
3574
  _OP_REQP = ["instance_name", "disks", "disk_template",
3575
              "mode", "start",
3576
              "wait_for_sync", "ip_check", "nics",
3577
              "hvparams", "beparams"]
3578
  REQ_BGL = False
3579

    
3580
  def _ExpandNode(self, node):
3581
    """Expands and checks one node name.
3582

3583
    """
3584
    node_full = self.cfg.ExpandNodeName(node)
3585
    if node_full is None:
3586
      raise errors.OpPrereqError("Unknown node %s" % node)
3587
    return node_full
3588

    
3589
  def ExpandNames(self):
3590
    """ExpandNames for CreateInstance.
3591

3592
    Figure out the right locks for instance creation.
3593

3594
    """
3595
    self.needed_locks = {}
3596

    
3597
    # set optional parameters to none if they don't exist
3598
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
3599
      if not hasattr(self.op, attr):
3600
        setattr(self.op, attr, None)
3601

    
3602
    # cheap checks, mostly valid constants given
3603

    
3604
    # verify creation mode
3605
    if self.op.mode not in (constants.INSTANCE_CREATE,
3606
                            constants.INSTANCE_IMPORT):
3607
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
3608
                                 self.op.mode)
3609

    
3610
    # disk template and mirror node verification
3611
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3612
      raise errors.OpPrereqError("Invalid disk template name")
3613

    
3614
    if self.op.hypervisor is None:
3615
      self.op.hypervisor = self.cfg.GetHypervisorType()
3616

    
3617
    cluster = self.cfg.GetClusterInfo()
3618
    enabled_hvs = cluster.enabled_hypervisors
3619
    if self.op.hypervisor not in enabled_hvs:
3620
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
3621
                                 " cluster (%s)" % (self.op.hypervisor,
3622
                                  ",".join(enabled_hvs)))
3623

    
3624
    # check hypervisor parameter syntax (locally)
3625

    
3626
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
3627
                                  self.op.hvparams)
3628
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
3629
    hv_type.CheckParameterSyntax(filled_hvp)
3630

    
3631
    # fill and remember the beparams dict
3632
    utils.CheckBEParams(self.op.beparams)
3633
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
3634
                                    self.op.beparams)
3635

    
3636
    #### instance parameters check
3637

    
3638
    # instance name verification
3639
    hostname1 = utils.HostInfo(self.op.instance_name)
3640
    self.op.instance_name = instance_name = hostname1.name
3641

    
3642
    # this is just a preventive check, but someone might still add this
3643
    # instance in the meantime, and creation will fail at lock-add time
3644
    if instance_name in self.cfg.GetInstanceList():
3645
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3646
                                 instance_name)
3647

    
3648
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
3649

    
3650
    # NIC buildup
3651
    self.nics = []
3652
    for nic in self.op.nics:
3653
      # ip validity checks
3654
      ip = nic.get("ip", None)
3655
      if ip is None or ip.lower() == "none":
3656
        nic_ip = None
3657
      elif ip.lower() == constants.VALUE_AUTO:
3658
        nic_ip = hostname1.ip
3659
      else:
3660
        if not utils.IsValidIP(ip):
3661
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
3662
                                     " like a valid IP" % ip)
3663
        nic_ip = ip
3664

    
3665
      # MAC address verification
3666
      mac = nic.get("mac", constants.VALUE_AUTO)
3667
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3668
        if not utils.IsValidMac(mac.lower()):
3669
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
3670
                                     mac)
3671
      # bridge verification
3672
      bridge = nic.get("bridge", self.cfg.GetDefBridge())
3673
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
3674

    
3675
    # disk checks/pre-build
3676
    self.disks = []
3677
    for disk in self.op.disks:
3678
      mode = disk.get("mode", constants.DISK_RDWR)
3679
      if mode not in constants.DISK_ACCESS_SET:
3680
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
3681
                                   mode)
3682
      size = disk.get("size", None)
3683
      if size is None:
3684
        raise errors.OpPrereqError("Missing disk size")
3685
      try:
3686
        size = int(size)
3687
      except ValueError:
3688
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
3689
      self.disks.append({"size": size, "mode": mode})
3690

    
3691
    # used in CheckPrereq for ip ping check
3692
    self.check_ip = hostname1.ip
3693

    
3694
    # file storage checks
3695
    if (self.op.file_driver and
3696
        not self.op.file_driver in constants.FILE_DRIVER):
3697
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
3698
                                 self.op.file_driver)
3699

    
3700
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
3701
      raise errors.OpPrereqError("File storage directory path not absolute")
3702

    
3703
    ### Node/iallocator related checks
3704
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
3705
      raise errors.OpPrereqError("One and only one of iallocator and primary"
3706
                                 " node must be given")
3707

    
3708
    if self.op.iallocator:
3709
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3710
    else:
3711
      self.op.pnode = self._ExpandNode(self.op.pnode)
3712
      nodelist = [self.op.pnode]
3713
      if self.op.snode is not None:
3714
        self.op.snode = self._ExpandNode(self.op.snode)
3715
        nodelist.append(self.op.snode)
3716
      self.needed_locks[locking.LEVEL_NODE] = nodelist
3717

    
3718
    # in case of import lock the source node too
3719
    if self.op.mode == constants.INSTANCE_IMPORT:
3720
      src_node = getattr(self.op, "src_node", None)
3721
      src_path = getattr(self.op, "src_path", None)
3722

    
3723
      if src_path is None:
3724
        self.op.src_path = src_path = self.op.instance_name
3725

    
3726
      if src_node is None:
3727
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3728
        self.op.src_node = None
3729
        if os.path.isabs(src_path):
3730
          raise errors.OpPrereqError("Importing an instance from an absolute"
3731
                                     " path requires a source node option.")
3732
      else:
3733
        self.op.src_node = src_node = self._ExpandNode(src_node)
3734
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
3735
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
3736
        if not os.path.isabs(src_path):
3737
          self.op.src_path = src_path = \
3738
            os.path.join(constants.EXPORT_DIR, src_path)
3739

    
3740
    else: # INSTANCE_CREATE
3741
      if getattr(self.op, "os_type", None) is None:
3742
        raise errors.OpPrereqError("No guest OS specified")
3743

    
3744
  def _RunAllocator(self):
3745
    """Run the allocator based on input opcode.
3746

3747
    """
3748
    nics = [n.ToDict() for n in self.nics]
3749
    ial = IAllocator(self,
3750
                     mode=constants.IALLOCATOR_MODE_ALLOC,
3751
                     name=self.op.instance_name,
3752
                     disk_template=self.op.disk_template,
3753
                     tags=[],
3754
                     os=self.op.os_type,
3755
                     vcpus=self.be_full[constants.BE_VCPUS],
3756
                     mem_size=self.be_full[constants.BE_MEMORY],
3757
                     disks=self.disks,
3758
                     nics=nics,
3759
                     hypervisor=self.op.hypervisor,
3760
                     )
3761

    
3762
    ial.Run(self.op.iallocator)
3763

    
3764
    if not ial.success:
3765
      raise errors.OpPrereqError("Can't compute nodes using"
3766
                                 " iallocator '%s': %s" % (self.op.iallocator,
3767
                                                           ial.info))
3768
    if len(ial.nodes) != ial.required_nodes:
3769
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
3770
                                 " of nodes (%s), required %s" %
3771
                                 (self.op.iallocator, len(ial.nodes),
3772
                                  ial.required_nodes))
3773
    self.op.pnode = ial.nodes[0]
3774
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
3775
                 self.op.instance_name, self.op.iallocator,
3776
                 ", ".join(ial.nodes))
3777
    if ial.required_nodes == 2:
3778
      self.op.snode = ial.nodes[1]
3779

    
3780
  def BuildHooksEnv(self):
3781
    """Build hooks env.
3782

3783
    This runs on master, primary and secondary nodes of the instance.
3784

3785
    """
3786
    env = {
3787
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
3788
      "INSTANCE_DISK_SIZE": ",".join(str(d["size"]) for d in self.disks),
3789
      "INSTANCE_ADD_MODE": self.op.mode,
3790
      }
3791
    if self.op.mode == constants.INSTANCE_IMPORT:
3792
      env["INSTANCE_SRC_NODE"] = self.op.src_node
3793
      env["INSTANCE_SRC_PATH"] = self.op.src_path
3794
      env["INSTANCE_SRC_IMAGES"] = self.src_images
3795

    
3796
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
3797
      primary_node=self.op.pnode,
3798
      secondary_nodes=self.secondaries,
3799
      status=self.instance_status,
3800
      os_type=self.op.os_type,
3801
      memory=self.be_full[constants.BE_MEMORY],
3802
      vcpus=self.be_full[constants.BE_VCPUS],
3803
      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
3804
    ))
3805

    
3806
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
3807
          self.secondaries)
3808
    return env, nl, nl
3809

    
3810

    
3811
  def CheckPrereq(self):
3812
    """Check prerequisites.
3813

3814
    """
3815
    if (not self.cfg.GetVGName() and
3816
        self.op.disk_template not in constants.DTS_NOT_LVM):
3817
      raise errors.OpPrereqError("Cluster does not support lvm-based"
3818
                                 " instances")
3819

    
3820

    
3821
    if self.op.mode == constants.INSTANCE_IMPORT:
3822
      src_node = self.op.src_node
3823
      src_path = self.op.src_path
3824

    
3825
      if src_node is None:
3826
        exp_list = self.rpc.call_export_list(
3827
          self.acquired_locks[locking.LEVEL_NODE])
3828
        found = False
3829
        for node in exp_list:
3830
          if not exp_list[node].failed and src_path in exp_list[node].data:
3831
            found = True
3832
            self.op.src_node = src_node = node
3833
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
3834
                                                       src_path)
3835
            break
3836
        if not found:
3837
          raise errors.OpPrereqError("No export found for relative path %s" %
3838
                                      src_path)
3839

    
3840
      result = self.rpc.call_export_info(src_node, src_path)
3841
      result.Raise()
3842
      if not result.data:
3843
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
3844

    
3845
      export_info = result.data
3846
      if not export_info.has_section(constants.INISECT_EXP):
3847
        raise errors.ProgrammerError("Corrupted export config")
3848

    
3849
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
3850
      if (int(ei_version) != constants.EXPORT_VERSION):
3851
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
3852
                                   (ei_version, constants.EXPORT_VERSION))
3853

    
3854
      # Check that the new instance doesn't have less disks than the export
3855
      instance_disks = len(self.disks)
3856
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
3857
      if instance_disks < export_disks:
3858
        raise errors.OpPrereqError("Not enough disks to import."
3859
                                   " (instance: %d, export: %d)" %
3860
                                   (instance_disks, export_disks))
3861

    
3862
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
3863
      disk_images = []
3864
      for idx in range(export_disks):
3865
        option = 'disk%d_dump' % idx
3866
        if export_info.has_option(constants.INISECT_INS, option):
3867
          # FIXME: are the old os-es, disk sizes, etc. useful?
3868
          export_name = export_info.get(constants.INISECT_INS, option)
3869
          image = os.path.join(src_path, export_name)
3870
          disk_images.append(image)
3871
        else:
3872
          disk_images.append(False)
3873

    
3874
      self.src_images = disk_images
3875

    
3876
      old_name = export_info.get(constants.INISECT_INS, 'name')
3877
      # FIXME: int() here could throw a ValueError on broken exports
3878
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
3879
      if self.op.instance_name == old_name:
3880
        for idx, nic in enumerate(self.nics):
3881
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
3882
            nic_mac_ini = 'nic%d_mac' % idx
3883
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
3884

    
3885
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
3886
    if self.op.start and not self.op.ip_check:
3887
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3888
                                 " adding an instance in start mode")
3889

    
3890
    if self.op.ip_check:
3891
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
3892
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3893
                                   (self.check_ip, self.op.instance_name))
3894

    
3895
    #### allocator run
3896

    
3897
    if self.op.iallocator is not None:
3898
      self._RunAllocator()
3899

    
3900
    #### node related checks
3901

    
3902
    # check primary node
3903
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
3904
    assert self.pnode is not None, \
3905
      "Cannot retrieve locked node %s" % self.op.pnode
3906
    self.secondaries = []
3907

    
3908
    # mirror node verification
3909
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3910
      if self.op.snode is None:
3911
        raise errors.OpPrereqError("The networked disk templates need"
3912
                                   " a mirror node")
3913
      if self.op.snode == pnode.name:
3914
        raise errors.OpPrereqError("The secondary node cannot be"
3915
                                   " the primary node.")
3916
      self.secondaries.append(self.op.snode)
3917

    
3918
    nodenames = [pnode.name] + self.secondaries
3919

    
3920
    req_size = _ComputeDiskSize(self.op.disk_template,
3921
                                self.disks)
3922

    
3923
    # Check lv size requirements
3924
    if req_size is not None:
3925
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3926
                                         self.op.hypervisor)
3927
      for node in nodenames:
3928
        info = nodeinfo[node]
3929
        info.Raise()
3930
        info = info.data
3931
        if not info:
3932
          raise errors.OpPrereqError("Cannot get current information"
3933
                                     " from node '%s'" % node)
3934
        vg_free = info.get('vg_free', None)
3935
        if not isinstance(vg_free, int):
3936
          raise errors.OpPrereqError("Can't compute free disk space on"
3937
                                     " node %s" % node)
3938
        if req_size > info['vg_free']:
3939
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3940
                                     " %d MB available, %d MB required" %
3941
                                     (node, info['vg_free'], req_size))
3942

    
3943
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
3944

    
3945
    # os verification
3946
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3947
    result.Raise()
3948
    if not isinstance(result.data, objects.OS):
3949
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3950
                                 " primary node"  % self.op.os_type)
3951

    
3952
    # bridge check on primary node
3953
    bridges = [n.bridge for n in self.nics]
3954
    result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
3955
    result.Raise()
3956
    if not result.data:
3957
      raise errors.OpPrereqError("One of the target bridges '%s' does not"
3958
                                 " exist on destination node '%s'" %
3959
                                 (",".join(bridges), pnode.name))
3960

    
3961
    # memory check on primary node
3962
    if self.op.start:
3963
      _CheckNodeFreeMemory(self, self.pnode.name,
3964
                           "creating instance %s" % self.op.instance_name,
3965
                           self.be_full[constants.BE_MEMORY],
3966
                           self.op.hypervisor)
3967

    
3968
    if self.op.start:
3969
      self.instance_status = 'up'
3970
    else:
3971
      self.instance_status = 'down'
3972

    
3973
  def Exec(self, feedback_fn):
3974
    """Create and add the instance to the cluster.
3975

3976
    """
3977
    instance = self.op.instance_name
3978
    pnode_name = self.pnode.name
3979

    
3980
    for nic in self.nics:
3981
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
3982
        nic.mac = self.cfg.GenerateMAC()
3983

    
3984
    ht_kind = self.op.hypervisor
3985
    if ht_kind in constants.HTS_REQ_PORT:
3986
      network_port = self.cfg.AllocatePort()
3987
    else:
3988
      network_port = None
3989

    
3990
    ##if self.op.vnc_bind_address is None:
3991
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
3992

    
3993
    # this is needed because os.path.join does not accept None arguments
3994
    if self.op.file_storage_dir is None:
3995
      string_file_storage_dir = ""
3996
    else:
3997
      string_file_storage_dir = self.op.file_storage_dir
3998

    
3999
    # build the full file storage dir path
4000
    file_storage_dir = os.path.normpath(os.path.join(
4001
                                        self.cfg.GetFileStorageDir(),
4002
                                        string_file_storage_dir, instance))
4003

    
4004

    
4005
    disks = _GenerateDiskTemplate(self,
4006
                                  self.op.disk_template,
4007
                                  instance, pnode_name,
4008
                                  self.secondaries,
4009
                                  self.disks,
4010
                                  file_storage_dir,
4011
                                  self.op.file_driver,
4012
                                  0)
4013

    
4014
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4015
                            primary_node=pnode_name,
4016
                            nics=self.nics, disks=disks,
4017
                            disk_template=self.op.disk_template,
4018
                            status=self.instance_status,
4019
                            network_port=network_port,
4020
                            beparams=self.op.beparams,
4021
                            hvparams=self.op.hvparams,
4022
                            hypervisor=self.op.hypervisor,
4023
                            )
4024

    
4025
    feedback_fn("* creating instance disks...")
4026
    if not _CreateDisks(self, iobj):
4027
      _RemoveDisks(self, iobj)
4028
      self.cfg.ReleaseDRBDMinors(instance)
4029
      raise errors.OpExecError("Device creation failed, reverting...")
4030

    
4031
    feedback_fn("adding instance %s to cluster config" % instance)
4032

    
4033
    self.cfg.AddInstance(iobj)
4034
    # Declare that we don't want to remove the instance lock anymore, as we've
4035
    # added the instance to the config
4036
    del self.remove_locks[locking.LEVEL_INSTANCE]
4037
    # Remove the temp. assignements for the instance's drbds
4038
    self.cfg.ReleaseDRBDMinors(instance)
4039
    # Unlock all the nodes
4040
    if self.op.mode == constants.INSTANCE_IMPORT:
4041
      nodes_keep = [self.op.src_node]
4042
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4043
                       if node != self.op.src_node]
4044
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4045
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4046
    else:
4047
      self.context.glm.release(locking.LEVEL_NODE)
4048
      del self.acquired_locks[locking.LEVEL_NODE]
4049

    
4050
    if self.op.wait_for_sync:
4051
      disk_abort = not _WaitForSync(self, iobj)
4052
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
4053
      # make sure the disks are not degraded (still sync-ing is ok)
4054
      time.sleep(15)
4055
      feedback_fn("* checking mirrors status")
4056
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
4057
    else:
4058
      disk_abort = False
4059

    
4060
    if disk_abort:
4061
      _RemoveDisks(self, iobj)
4062
      self.cfg.RemoveInstance(iobj.name)
4063
      # Make sure the instance lock gets removed
4064
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
4065
      raise errors.OpExecError("There are some degraded disks for"
4066
                               " this instance")
4067

    
4068
    feedback_fn("creating os for instance %s on node %s" %
4069
                (instance, pnode_name))
4070

    
4071
    if iobj.disk_template != constants.DT_DISKLESS:
4072
      if self.op.mode == constants.INSTANCE_CREATE:
4073
        feedback_fn("* running the instance OS create scripts...")
4074
        result = self.rpc.call_instance_os_add(pnode_name, iobj)
4075
        result.Raise()
4076
        if not result.data:
4077
          raise errors.OpExecError("Could not add os for instance %s"
4078
                                   " on node %s" %
4079
                                   (instance, pnode_name))
4080

    
4081
      elif self.op.mode == constants.INSTANCE_IMPORT:
4082
        feedback_fn("* running the instance OS import scripts...")
4083
        src_node = self.op.src_node
4084
        src_images = self.src_images
4085
        cluster_name = self.cfg.GetClusterName()
4086
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
4087
                                                         src_node, src_images,
4088
                                                         cluster_name)
4089
        import_result.Raise()
4090
        for idx, result in enumerate(import_result.data):
4091
          if not result:
4092
            self.LogWarning("Could not import the image %s for instance"
4093
                            " %s, disk %d, on node %s" %
4094
                            (src_images[idx], instance, idx, pnode_name))
4095
      else:
4096
        # also checked in the prereq part
4097
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
4098
                                     % self.op.mode)
4099

    
4100
    if self.op.start:
4101
      logging.info("Starting instance %s on node %s", instance, pnode_name)
4102
      feedback_fn("* starting instance...")
4103
      result = self.rpc.call_instance_start(pnode_name, iobj, None)
4104
      result.Raise()
4105
      if not result.data:
4106
        raise errors.OpExecError("Could not start instance")
4107

    
4108

    
4109
class LUConnectConsole(NoHooksLU):
4110
  """Connect to an instance's console.
4111

4112
  This is somewhat special in that it returns the command line that
4113
  you need to run on the master node in order to connect to the
4114
  console.
4115

4116
  """
4117
  _OP_REQP = ["instance_name"]
4118
  REQ_BGL = False
4119

    
4120
  def ExpandNames(self):
4121
    self._ExpandAndLockInstance()
4122

    
4123
  def CheckPrereq(self):
4124
    """Check prerequisites.
4125

4126
    This checks that the instance is in the cluster.
4127

4128
    """
4129
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4130
    assert self.instance is not None, \
4131
      "Cannot retrieve locked instance %s" % self.op.instance_name
4132

    
4133
  def Exec(self, feedback_fn):
4134
    """Connect to the console of an instance
4135

4136
    """
4137
    instance = self.instance
4138
    node = instance.primary_node
4139

    
4140
    node_insts = self.rpc.call_instance_list([node],
4141
                                             [instance.hypervisor])[node]
4142
    node_insts.Raise()
4143

    
4144
    if instance.name not in node_insts.data:
4145
      raise errors.OpExecError("Instance %s is not running." % instance.name)
4146

    
4147
    logging.debug("Connecting to console of %s on %s", instance.name, node)
4148

    
4149
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
4150
    console_cmd = hyper.GetShellCommandForConsole(instance)
4151

    
4152
    # build ssh cmdline
4153
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
4154

    
4155

    
4156
class LUReplaceDisks(LogicalUnit):
4157
  """Replace the disks of an instance.
4158

4159
  """
4160
  HPATH = "mirrors-replace"
4161
  HTYPE = constants.HTYPE_INSTANCE
4162
  _OP_REQP = ["instance_name", "mode", "disks"]
4163
  REQ_BGL = False
4164

    
4165
  def ExpandNames(self):
4166
    self._ExpandAndLockInstance()
4167

    
4168
    if not hasattr(self.op, "remote_node"):
4169
      self.op.remote_node = None
4170

    
4171
    ia_name = getattr(self.op, "iallocator", None)
4172
    if ia_name is not None:
4173
      if self.op.remote_node is not None:
4174
        raise errors.OpPrereqError("Give either the iallocator or the new"
4175
                                   " secondary, not both")
4176
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4177
    elif self.op.remote_node is not None:
4178
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
4179
      if remote_node is None:
4180
        raise errors.OpPrereqError("Node '%s' not known" %
4181
                                   self.op.remote_node)
4182
      self.op.remote_node = remote_node
4183
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
4184
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4185
    else:
4186
      self.needed_locks[locking.LEVEL_NODE] = []
4187
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4188

    
4189
  def DeclareLocks(self, level):
4190
    # If we're not already locking all nodes in the set we have to declare the
4191
    # instance's primary/secondary nodes.
4192
    if (level == locking.LEVEL_NODE and
4193
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
4194
      self._LockInstancesNodes()
4195

    
4196
  def _RunAllocator(self):
4197
    """Compute a new secondary node using an IAllocator.
4198

4199
    """
4200
    ial = IAllocator(self,
4201
                     mode=constants.IALLOCATOR_MODE_RELOC,
4202
                     name=self.op.instance_name,
4203
                     relocate_from=[self.sec_node])
4204

    
4205
    ial.Run(self.op.iallocator)
4206

    
4207
    if not ial.success:
4208
      raise errors.OpPrereqError("Can't compute nodes using"
4209
                                 " iallocator '%s': %s" % (self.op.iallocator,
4210
                                                           ial.info))
4211
    if len(ial.nodes) != ial.required_nodes:
4212
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4213
                                 " of nodes (%s), required %s" %
4214
                                 (len(ial.nodes), ial.required_nodes))
4215
    self.op.remote_node = ial.nodes[0]
4216
    self.LogInfo("Selected new secondary for the instance: %s",
4217
                 self.op.remote_node)
4218

    
4219
  def BuildHooksEnv(self):
4220
    """Build hooks env.
4221

4222
    This runs on the master, the primary and all the secondaries.
4223

4224
    """
4225
    env = {
4226
      "MODE": self.op.mode,
4227
      "NEW_SECONDARY": self.op.remote_node,
4228
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
4229
      }
4230
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4231
    nl = [
4232
      self.cfg.GetMasterNode(),
4233
      self.instance.primary_node,
4234
      ]
4235
    if self.op.remote_node is not None:
4236
      nl.append(self.op.remote_node)
4237
    return env, nl, nl
4238

    
4239
  def CheckPrereq(self):
4240
    """Check prerequisites.
4241

4242
    This checks that the instance is in the cluster.
4243

4244
    """
4245
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4246
    assert instance is not None, \
4247
      "Cannot retrieve locked instance %s" % self.op.instance_name
4248
    self.instance = instance
4249

    
4250
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4251
      raise errors.OpPrereqError("Instance's disk layout is not"
4252
                                 " network mirrored.")
4253

    
4254
    if len(instance.secondary_nodes) != 1:
4255
      raise errors.OpPrereqError("The instance has a strange layout,"
4256
                                 " expected one secondary but found %d" %
4257
                                 len(instance.secondary_nodes))
4258

    
4259
    self.sec_node = instance.secondary_nodes[0]
4260

    
4261
    ia_name = getattr(self.op, "iallocator", None)
4262
    if ia_name is not None:
4263
      self._RunAllocator()
4264

    
4265
    remote_node = self.op.remote_node
4266
    if remote_node is not None:
4267
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
4268
      assert self.remote_node_info is not None, \
4269
        "Cannot retrieve locked node %s" % remote_node
4270
    else:
4271
      self.remote_node_info = None
4272
    if remote_node == instance.primary_node:
4273
      raise errors.OpPrereqError("The specified node is the primary node of"
4274
                                 " the instance.")
4275
    elif remote_node == self.sec_node:
4276
      if self.op.mode == constants.REPLACE_DISK_SEC:
4277
        # this is for DRBD8, where we can't execute the same mode of
4278
        # replacement as for drbd7 (no different port allocated)
4279
        raise errors.OpPrereqError("Same secondary given, cannot execute"
4280
                                   " replacement")
4281
    if instance.disk_template == constants.DT_DRBD8:
4282
      if (self.op.mode == constants.REPLACE_DISK_ALL and
4283
          remote_node is not None):
4284
        # switch to replace secondary mode
4285
        self.op.mode = constants.REPLACE_DISK_SEC
4286

    
4287
      if self.op.mode == constants.REPLACE_DISK_ALL:
4288
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
4289
                                   " secondary disk replacement, not"
4290
                                   " both at once")
4291
      elif self.op.mode == constants.REPLACE_DISK_PRI:
4292
        if remote_node is not None:
4293
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
4294
                                     " the secondary while doing a primary"
4295
                                     " node disk replacement")
4296
        self.tgt_node = instance.primary_node
4297
        self.oth_node = instance.secondary_nodes[0]
4298
      elif self.op.mode == constants.REPLACE_DISK_SEC:
4299
        self.new_node = remote_node # this can be None, in which case
4300
                                    # we don't change the secondary
4301
        self.tgt_node = instance.secondary_nodes[0]
4302
        self.oth_node = instance.primary_node
4303
      else:
4304
        raise errors.ProgrammerError("Unhandled disk replace mode")
4305

    
4306
    if not self.op.disks:
4307
      self.op.disks = range(len(instance.disks))
4308

    
4309
    for disk_idx in self.op.disks:
4310
      instance.FindDisk(disk_idx)
4311

    
4312
  def _ExecD8DiskOnly(self, feedback_fn):
4313
    """Replace a disk on the primary or secondary for dbrd8.
4314

4315
    The algorithm for replace is quite complicated:
4316

4317
      1. for each disk to be replaced:
4318

4319
        1. create new LVs on the target node with unique names
4320
        1. detach old LVs from the drbd device
4321
        1. rename old LVs to name_replaced.<time_t>
4322
        1. rename new LVs to old LVs
4323
        1. attach the new LVs (with the old names now) to the drbd device
4324

4325
      1. wait for sync across all devices
4326

4327
      1. for each modified disk:
4328

4329
        1. remove old LVs (which have the name name_replaces.<time_t>)
4330

4331
    Failures are not very well handled.
4332

4333
    """
4334
    steps_total = 6
4335
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4336
    instance = self.instance
4337
    iv_names = {}
4338
    vgname = self.cfg.GetVGName()
4339
    # start of work
4340
    cfg = self.cfg
4341
    tgt_node = self.tgt_node
4342
    oth_node = self.oth_node
4343

    
4344
    # Step: check device activation
4345
    self.proc.LogStep(1, steps_total, "check device existence")
4346
    info("checking volume groups")
4347
    my_vg = cfg.GetVGName()
4348
    results = self.rpc.call_vg_list([oth_node, tgt_node])
4349
    if not results:
4350
      raise errors.OpExecError("Can't list volume groups on the nodes")
4351
    for node in oth_node, tgt_node:
4352
      res = results[node]
4353
      if res.failed or not res.data or my_vg not in res.data:
4354
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4355
                                 (my_vg, node))
4356
    for idx, dev in enumerate(instance.disks):
4357
      if idx not in self.op.disks:
4358
        continue
4359
      for node in tgt_node, oth_node:
4360
        info("checking disk/%d on %s" % (idx, node))
4361
        cfg.SetDiskID(dev, node)
4362
        if not self.rpc.call_blockdev_find(node, dev):
4363
          raise errors.OpExecError("Can't find disk/%d on node %s" %
4364
                                   (idx, node))
4365

    
4366
    # Step: check other node consistency
4367
    self.proc.LogStep(2, steps_total, "check peer consistency")
4368
    for idx, dev in enumerate(instance.disks):
4369
      if idx not in self.op.disks:
4370
        continue
4371
      info("checking disk/%d consistency on %s" % (idx, oth_node))
4372
      if not _CheckDiskConsistency(self, dev, oth_node,
4373
                                   oth_node==instance.primary_node):
4374
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
4375
                                 " to replace disks on this node (%s)" %
4376
                                 (oth_node, tgt_node))
4377

    
4378
    # Step: create new storage
4379
    self.proc.LogStep(3, steps_total, "allocate new storage")
4380
    for idx, dev in enumerate(instance.disks):
4381
      if idx not in self.op.disks:
4382
        continue
4383
      size = dev.size
4384
      cfg.SetDiskID(dev, tgt_node)
4385
      lv_names = [".disk%d_%s" % (idx, suf)
4386
                  for suf in ["data", "meta"]]
4387
      names = _GenerateUniqueNames(self, lv_names)
4388
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4389
                             logical_id=(vgname, names[0]))
4390
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4391
                             logical_id=(vgname, names[1]))
4392
      new_lvs = [lv_data, lv_meta]
4393
      old_lvs = dev.children
4394
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
4395
      info("creating new local storage on %s for %s" %
4396
           (tgt_node, dev.iv_name))
4397
      # since we *always* want to create this LV, we use the
4398
      # _Create...OnPrimary (which forces the creation), even if we
4399
      # are talking about the secondary node
4400
      for new_lv in new_lvs:
4401
        if not _CreateBlockDevOnPrimary(self, tgt_node, instance, new_lv,
4402
                                        _GetInstanceInfoText(instance)):
4403
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4404
                                   " node '%s'" %
4405
                                   (new_lv.logical_id[1], tgt_node))
4406

    
4407
    # Step: for each lv, detach+rename*2+attach
4408
    self.proc.LogStep(4, steps_total, "change drbd configuration")
4409
    for dev, old_lvs, new_lvs in iv_names.itervalues():
4410
      info("detaching %s drbd from local storage" % dev.iv_name)
4411
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
4412
      result.Raise()
4413
      if not result.data:
4414
        raise errors.OpExecError("Can't detach drbd from local storage on node"
4415
                                 " %s for device %s" % (tgt_node, dev.iv_name))
4416
      #dev.children = []
4417
      #cfg.Update(instance)
4418

    
4419
      # ok, we created the new LVs, so now we know we have the needed
4420
      # storage; as such, we proceed on the target node to rename
4421
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
4422
      # using the assumption that logical_id == physical_id (which in
4423
      # turn is the unique_id on that node)
4424

    
4425
      # FIXME(iustin): use a better name for the replaced LVs
4426
      temp_suffix = int(time.time())
4427
      ren_fn = lambda d, suff: (d.physical_id[0],
4428
                                d.physical_id[1] + "_replaced-%s" % suff)
4429
      # build the rename list based on what LVs exist on the node
4430
      rlist = []
4431
      for to_ren in old_lvs:
4432
        find_res = self.rpc.call_blockdev_find(tgt_node, to_ren)
4433
        if not find_res.failed and find_res.data is not None: # device exists
4434
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
4435

    
4436
      info("renaming the old LVs on the target node")
4437
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
4438
      result.Raise()
4439
      if not result.data:
4440
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
4441
      # now we rename the new LVs to the old LVs
4442
      info("renaming the new LVs on the target node")
4443
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
4444
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
4445
      result.Raise()
4446
      if not result.data:
4447
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
4448

    
4449
      for old, new in zip(old_lvs, new_lvs):
4450
        new.logical_id = old.logical_id
4451
        cfg.SetDiskID(new, tgt_node)
4452

    
4453
      for disk in old_lvs:
4454
        disk.logical_id = ren_fn(disk, temp_suffix)
4455
        cfg.SetDiskID(disk, tgt_node)
4456

    
4457
      # now that the new lvs have the old name, we can add them to the device
4458
      info("adding new mirror component on %s" % tgt_node)
4459
      result =self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
4460
      if result.failed or not result.data:
4461
        for new_lv in new_lvs:
4462
          result = self.rpc.call_blockdev_remove(tgt_node, new_lv)
4463
          if result.failed or not result.data:
4464
            warning("Can't rollback device %s", hint="manually cleanup unused"
4465
                    " logical volumes")
4466
        raise errors.OpExecError("Can't add local storage to drbd")
4467

    
4468
      dev.children = new_lvs
4469
      cfg.Update(instance)
4470

    
4471
    # Step: wait for sync
4472

    
4473
    # this can fail as the old devices are degraded and _WaitForSync
4474
    # does a combined result over all disks, so we don't check its
4475
    # return value
4476
    self.proc.LogStep(5, steps_total, "sync devices")
4477
    _WaitForSync(self, instance, unlock=True)
4478

    
4479
    # so check manually all the devices
4480
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4481
      cfg.SetDiskID(dev, instance.primary_node)
4482
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
4483
      if result.failed or result.data[5]:
4484
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
4485

    
4486
    # Step: remove old storage
4487
    self.proc.LogStep(6, steps_total, "removing old storage")
4488
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
4489
      info("remove logical volumes for %s" % name)
4490
      for lv in old_lvs:
4491
        cfg.SetDiskID(lv, tgt_node)
4492
        result = self.rpc.call_blockdev_remove(tgt_node, lv)
4493
        if result.failed or not result.data:
4494
          warning("Can't remove old LV", hint="manually remove unused LVs")
4495
          continue
4496

    
4497
  def _ExecD8Secondary(self, feedback_fn):
4498
    """Replace the secondary node for drbd8.
4499

4500
    The algorithm for replace is quite complicated:
4501
      - for all disks of the instance:
4502
        - create new LVs on the new node with same names
4503
        - shutdown the drbd device on the old secondary
4504
        - disconnect the drbd network on the primary
4505
        - create the drbd device on the new secondary
4506
        - network attach the drbd on the primary, using an artifice:
4507
          the drbd code for Attach() will connect to the network if it
4508
          finds a device which is connected to the good local disks but
4509
          not network enabled
4510
      - wait for sync across all devices
4511
      - remove all disks from the old secondary
4512

4513
    Failures are not very well handled.
4514

4515
    """
4516
    steps_total = 6
4517
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4518
    instance = self.instance
4519
    iv_names = {}
4520
    vgname = self.cfg.GetVGName()
4521
    # start of work
4522
    cfg = self.cfg
4523
    old_node = self.tgt_node
4524
    new_node = self.new_node
4525
    pri_node = instance.primary_node
4526

    
4527
    # Step: check device activation
4528
    self.proc.LogStep(1, steps_total, "check device existence")
4529
    info("checking volume groups")
4530
    my_vg = cfg.GetVGName()
4531
    results = self.rpc.call_vg_list([pri_node, new_node])
4532
    for node in pri_node, new_node:
4533
      res = results[node]
4534
      if res.failed or not res.data or my_vg not in res.data:
4535
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4536
                                 (my_vg, node))
4537
    for idx, dev in enumerate(instance.disks):
4538
      if idx not in self.op.disks:
4539
        continue
4540
      info("checking disk/%d on %s" % (idx, pri_node))
4541
      cfg.SetDiskID(dev, pri_node)
4542
      result = self.rpc.call_blockdev_find(pri_node, dev)
4543
      result.Raise()
4544
      if not result.data:
4545
        raise errors.OpExecError("Can't find disk/%d on node %s" %
4546
                                 (idx, pri_node))
4547

    
4548
    # Step: check other node consistency
4549
    self.proc.LogStep(2, steps_total, "check peer consistency")
4550
    for idx, dev in enumerate(instance.disks):
4551
      if idx not in self.op.disks:
4552
        continue
4553
      info("checking disk/%d consistency on %s" % (idx, pri_node))
4554
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
4555
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
4556
                                 " unsafe to replace the secondary" %
4557
                                 pri_node)
4558

    
4559
    # Step: create new storage
4560
    self.proc.LogStep(3, steps_total, "allocate new storage")
4561
    for idx, dev in enumerate(instance.disks):
4562
      size = dev.size
4563
      info("adding new local storage on %s for disk/%d" %
4564
           (new_node, idx))
4565
      # since we *always* want to create this LV, we use the
4566
      # _Create...OnPrimary (which forces the creation), even if we
4567
      # are talking about the secondary node
4568
      for new_lv in dev.children:
4569
        if not _CreateBlockDevOnPrimary(self, new_node, instance, new_lv,
4570
                                        _GetInstanceInfoText(instance)):
4571
          raise errors.OpExecError("Failed to create new LV named '%s' on"
4572
                                   " node '%s'" %
4573
                                   (new_lv.logical_id[1], new_node))
4574

    
4575
    # Step 4: dbrd minors and drbd setups changes
4576
    # after this, we must manually remove the drbd minors on both the
4577
    # error and the success paths
4578
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
4579
                                   instance.name)
4580
    logging.debug("Allocated minors %s" % (minors,))
4581
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
4582
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
4583
      size = dev.size
4584
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
4585
      # create new devices on new_node
4586
      if pri_node == dev.logical_id[0]:
4587
        new_logical_id = (pri_node, new_node,
4588
                          dev.logical_id[2], dev.logical_id[3], new_minor,
4589
                          dev.logical_id[5])
4590
      else:
4591
        new_logical_id = (new_node, pri_node,
4592
                          dev.logical_id[2], new_minor, dev.logical_id[4],
4593
                          dev.logical_id[5])
4594
      iv_names[idx] = (dev, dev.children, new_logical_id)
4595
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
4596
                    new_logical_id)
4597
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4598
                              logical_id=new_logical_id,
4599
                              children=dev.children)
4600
      if not _CreateBlockDevOnSecondary(self, new_node, instance,
4601
                                        new_drbd, False,
4602
                                        _GetInstanceInfoText(instance)):
4603
        self.cfg.ReleaseDRBDMinors(instance.name)
4604
        raise errors.OpExecError("Failed to create new DRBD on"
4605
                                 " node '%s'" % new_node)
4606

    
4607
    for idx, dev in enumerate(instance.disks):
4608
      # we have new devices, shutdown the drbd on the old secondary
4609
      info("shutting down drbd for disk/%d on old node" % idx)
4610
      cfg.SetDiskID(dev, old_node)
4611
      result = self.rpc.call_blockdev_shutdown(old_node, dev)
4612
      if result.failed or not result.data:
4613
        warning("Failed to shutdown drbd for disk/%d on old node" % idx,
4614
                hint="Please cleanup this device manually as soon as possible")
4615

    
4616
    info("detaching primary drbds from the network (=> standalone)")
4617
    done = 0
4618
    for idx, dev in enumerate(instance.disks):
4619
      cfg.SetDiskID(dev, pri_node)
4620
      # set the network part of the physical (unique in bdev terms) id
4621
      # to None, meaning detach from network
4622
      dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
4623
      # and 'find' the device, which will 'fix' it to match the
4624
      # standalone state
4625
      result = self.rpc.call_blockdev_find(pri_node, dev)
4626
      if not result.failed and result.data:
4627
        done += 1
4628
      else:
4629
        warning("Failed to detach drbd disk/%d from network, unusual case" %
4630
                idx)
4631

    
4632
    if not done:
4633
      # no detaches succeeded (very unlikely)
4634
      self.cfg.ReleaseDRBDMinors(instance.name)
4635
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4636

    
4637
    # if we managed to detach at least one, we update all the disks of
4638
    # the instance to point to the new secondary
4639
    info("updating instance configuration")
4640
    for dev, _, new_logical_id in iv_names.itervalues():
4641
      dev.logical_id = new_logical_id
4642
      cfg.SetDiskID(dev, pri_node)
4643
    cfg.Update(instance)
4644
    # we can remove now the temp minors as now the new values are
4645
    # written to the config file (and therefore stable)
4646
    self.cfg.ReleaseDRBDMinors(instance.name)
4647

    
4648
    # and now perform the drbd attach
4649
    info("attaching primary drbds to new secondary (standalone => connected)")
4650
    failures = []
4651
    for idx, dev in enumerate(instance.disks):
4652
      info("attaching primary drbd for disk/%d to new secondary node" % idx)
4653
      # since the attach is smart, it's enough to 'find' the device,
4654
      # it will automatically activate the network, if the physical_id
4655
      # is correct
4656
      cfg.SetDiskID(dev, pri_node)
4657
      logging.debug("Disk to attach: %s", dev)
4658
      result = self.rpc.call_blockdev_find(pri_node, dev)
4659
      if result.failed or not result.data:
4660
        warning("can't attach drbd disk/%d to new secondary!" % idx,
4661
                "please do a gnt-instance info to see the status of disks")
4662

    
4663
    # this can fail as the old devices are degraded and _WaitForSync
4664
    # does a combined result over all disks, so we don't check its
4665
    # return value
4666
    self.proc.LogStep(5, steps_total, "sync devices")
4667
    _WaitForSync(self, instance, unlock=True)
4668

    
4669
    # so check manually all the devices
4670
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
4671
      cfg.SetDiskID(dev, pri_node)
4672
      result = self.rpc.call_blockdev_find(pri_node, dev)
4673
      result.Raise()
4674
      if result.data[5]:
4675
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
4676

    
4677
    self.proc.LogStep(6, steps_total, "removing old storage")
4678
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
4679
      info("remove logical volumes for disk/%d" % idx)
4680
      for lv in old_lvs:
4681
        cfg.SetDiskID(lv, old_node)
4682
        result = self.rpc.call_blockdev_remove(old_node, lv)
4683
        if result.failed or not result.data:
4684
          warning("Can't remove LV on old secondary",
4685
                  hint="Cleanup stale volumes by hand")
4686

    
4687
  def Exec(self, feedback_fn):
4688
    """Execute disk replacement.
4689

4690
    This dispatches the disk replacement to the appropriate handler.
4691

4692
    """
4693
    instance = self.instance
4694

    
4695
    # Activate the instance disks if we're replacing them on a down instance
4696
    if instance.status == "down":
4697
      _StartInstanceDisks(self, instance, True)
4698

    
4699
    if instance.disk_template == constants.DT_DRBD8:
4700
      if self.op.remote_node is None:
4701
        fn = self._ExecD8DiskOnly
4702
      else:
4703
        fn = self._ExecD8Secondary
4704
    else:
4705
      raise errors.ProgrammerError("Unhandled disk replacement case")
4706

    
4707
    ret = fn(feedback_fn)
4708

    
4709
    # Deactivate the instance disks if we're replacing them on a down instance
4710
    if instance.status == "down":
4711
      _SafeShutdownInstanceDisks(self, instance)
4712

    
4713
    return ret
4714

    
4715

    
4716
class LUGrowDisk(LogicalUnit):
4717
  """Grow a disk of an instance.
4718

4719
  """
4720
  HPATH = "disk-grow"
4721
  HTYPE = constants.HTYPE_INSTANCE
4722
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
4723
  REQ_BGL = False
4724

    
4725
  def ExpandNames(self):
4726
    self._ExpandAndLockInstance()
4727
    self.needed_locks[locking.LEVEL_NODE] = []
4728
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4729

    
4730
  def DeclareLocks(self, level):
4731
    if level == locking.LEVEL_NODE:
4732
      self._LockInstancesNodes()
4733

    
4734
  def BuildHooksEnv(self):
4735
    """Build hooks env.
4736

4737
    This runs on the master, the primary and all the secondaries.
4738

4739
    """
4740
    env = {
4741
      "DISK": self.op.disk,
4742
      "AMOUNT": self.op.amount,
4743
      }
4744
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4745
    nl = [
4746
      self.cfg.GetMasterNode(),
4747
      self.instance.primary_node,
4748
      ]
4749
    return env, nl, nl
4750

    
4751
  def CheckPrereq(self):
4752
    """Check prerequisites.
4753

4754
    This checks that the instance is in the cluster.
4755

4756
    """
4757
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4758
    assert instance is not None, \
4759
      "Cannot retrieve locked instance %s" % self.op.instance_name
4760

    
4761
    self.instance = instance
4762

    
4763
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
4764
      raise errors.OpPrereqError("Instance's disk layout does not support"
4765
                                 " growing.")
4766

    
4767
    self.disk = instance.FindDisk(self.op.disk)
4768

    
4769
    nodenames = [instance.primary_node] + list(instance.secondary_nodes)
4770
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4771
                                       instance.hypervisor)
4772
    for node in nodenames:
4773
      info = nodeinfo[node]
4774
      if info.failed or not info.data:
4775
        raise errors.OpPrereqError("Cannot get current information"
4776
                                   " from node '%s'" % node)
4777
      vg_free = info.data.get('vg_free', None)
4778
      if not isinstance(vg_free, int):
4779
        raise errors.OpPrereqError("Can't compute free disk space on"
4780
                                   " node %s" % node)
4781
      if self.op.amount > vg_free:
4782
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
4783
                                   " %d MiB available, %d MiB required" %
4784
                                   (node, vg_free, self.op.amount))
4785

    
4786
  def Exec(self, feedback_fn):
4787
    """Execute disk grow.
4788

4789
    """
4790
    instance = self.instance
4791
    disk = self.disk
4792
    for node in (instance.secondary_nodes + (instance.primary_node,)):
4793
      self.cfg.SetDiskID(disk, node)
4794
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
4795
      result.Raise()
4796
      if (not result.data or not isinstance(result.data, (list, tuple)) or
4797
          len(result.data) != 2):
4798
        raise errors.OpExecError("Grow request failed to node %s" % node)
4799
      elif not result.data[0]:
4800
        raise errors.OpExecError("Grow request failed to node %s: %s" %
4801
                                 (node, result.data[1]))
4802
    disk.RecordGrow(self.op.amount)
4803
    self.cfg.Update(instance)
4804
    if self.op.wait_for_sync:
4805
      disk_abort = not _WaitForSync(self, instance)
4806
      if disk_abort:
4807
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
4808
                             " status.\nPlease check the instance.")
4809

    
4810

    
4811
class LUQueryInstanceData(NoHooksLU):
4812
  """Query runtime instance data.
4813

4814
  """
4815
  _OP_REQP = ["instances", "static"]
4816
  REQ_BGL = False
4817

    
4818
  def ExpandNames(self):
4819
    self.needed_locks = {}
4820
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
4821

    
4822
    if not isinstance(self.op.instances, list):
4823
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4824

    
4825
    if self.op.instances:
4826
      self.wanted_names = []
4827
      for name in self.op.instances:
4828
        full_name = self.cfg.ExpandInstanceName(name)
4829
        if full_name is None:
4830
          raise errors.OpPrereqError("Instance '%s' not known" %
4831
                                     self.op.instance_name)
4832
        self.wanted_names.append(full_name)
4833
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
4834
    else:
4835
      self.wanted_names = None
4836
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4837

    
4838
    self.needed_locks[locking.LEVEL_NODE] = []
4839
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4840

    
4841
  def DeclareLocks(self, level):
4842
    if level == locking.LEVEL_NODE:
4843
      self._LockInstancesNodes()
4844

    
4845
  def CheckPrereq(self):
4846
    """Check prerequisites.
4847

4848
    This only checks the optional instance list against the existing names.
4849

4850
    """
4851
    if self.wanted_names is None:
4852
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4853

    
4854
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4855
                             in self.wanted_names]
4856
    return
4857

    
4858
  def _ComputeDiskStatus(self, instance, snode, dev):
4859
    """Compute block device status.
4860

4861
    """
4862
    static = self.op.static
4863
    if not static:
4864
      self.cfg.SetDiskID(dev, instance.primary_node)
4865
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
4866
      dev_pstatus.Raise()
4867
      dev_pstatus = dev_pstatus.data
4868
    else:
4869
      dev_pstatus = None
4870

    
4871
    if dev.dev_type in constants.LDS_DRBD:
4872
      # we change the snode then (otherwise we use the one passed in)
4873
      if dev.logical_id[0] == instance.primary_node:
4874
        snode = dev.logical_id[1]
4875
      else:
4876
        snode = dev.logical_id[0]
4877

    
4878
    if snode and not static:
4879
      self.cfg.SetDiskID(dev, snode)
4880
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
4881
      dev_sstatus.Raise()
4882
      dev_sstatus = dev_sstatus.data
4883
    else:
4884
      dev_sstatus = None
4885

    
4886
    if dev.children:
4887
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4888
                      for child in dev.children]
4889
    else:
4890
      dev_children = []
4891

    
4892
    data = {
4893
      "iv_name": dev.iv_name,
4894
      "dev_type": dev.dev_type,
4895
      "logical_id": dev.logical_id,
4896
      "physical_id": dev.physical_id,
4897
      "pstatus": dev_pstatus,
4898
      "sstatus": dev_sstatus,
4899
      "children": dev_children,
4900
      "mode": dev.mode,
4901
      }
4902

    
4903
    return data
4904

    
4905
  def Exec(self, feedback_fn):
4906
    """Gather and return data"""
4907
    result = {}
4908

    
4909
    cluster = self.cfg.GetClusterInfo()
4910

    
4911
    for instance in self.wanted_instances:
4912
      if not self.op.static:
4913
        remote_info = self.rpc.call_instance_info(instance.primary_node,
4914
                                                  instance.name,
4915
                                                  instance.hypervisor)
4916
        remote_info.Raise()
4917
        remote_info = remote_info.data
4918
        if remote_info and "state" in remote_info:
4919
          remote_state = "up"
4920
        else:
4921
          remote_state = "down"
4922
      else:
4923
        remote_state = None
4924
      if instance.status == "down":
4925
        config_state = "down"
4926
      else:
4927
        config_state = "up"
4928

    
4929
      disks = [self._ComputeDiskStatus(instance, None, device)
4930
               for device in instance.disks]
4931

    
4932
      idict = {
4933
        "name": instance.name,
4934
        "config_state": config_state,
4935
        "run_state": remote_state,
4936
        "pnode": instance.primary_node,
4937
        "snodes": instance.secondary_nodes,
4938
        "os": instance.os,
4939
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4940
        "disks": disks,
4941
        "hypervisor": instance.hypervisor,
4942
        "network_port": instance.network_port,
4943
        "hv_instance": instance.hvparams,
4944
        "hv_actual": cluster.FillHV(instance),
4945
        "be_instance": instance.beparams,
4946
        "be_actual": cluster.FillBE(instance),
4947
        }
4948

    
4949
      result[instance.name] = idict
4950

    
4951
    return result
4952

    
4953

    
4954
class LUSetInstanceParams(LogicalUnit):
4955
  """Modifies an instances's parameters.
4956

4957
  """
4958
  HPATH = "instance-modify"
4959
  HTYPE = constants.HTYPE_INSTANCE
4960
  _OP_REQP = ["instance_name"]
4961
  REQ_BGL = False
4962

    
4963
  def CheckArguments(self):
4964
    if not hasattr(self.op, 'nics'):
4965
      self.op.nics = []
4966
    if not hasattr(self.op, 'disks'):
4967
      self.op.disks = []
4968
    if not hasattr(self.op, 'beparams'):
4969
      self.op.beparams = {}
4970
    if not hasattr(self.op, 'hvparams'):
4971
      self.op.hvparams = {}
4972
    self.op.force = getattr(self.op, "force", False)
4973
    if not (self.op.nics or self.op.disks or
4974
            self.op.hvparams or self.op.beparams):
4975
      raise errors.OpPrereqError("No changes submitted")
4976

    
4977
    utils.CheckBEParams(self.op.beparams)
4978

    
4979
    # Disk validation
4980
    disk_addremove = 0
4981
    for disk_op, disk_dict in self.op.disks:
4982
      if disk_op == constants.DDM_REMOVE:
4983
        disk_addremove += 1
4984
        continue
4985
      elif disk_op == constants.DDM_ADD:
4986
        disk_addremove += 1
4987
      else:
4988
        if not isinstance(disk_op, int):
4989
          raise errors.OpPrereqError("Invalid disk index")
4990
      if disk_op == constants.DDM_ADD:
4991
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
4992
        if mode not in (constants.DISK_RDONLY, constants.DISK_RDWR):
4993
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
4994
        size = disk_dict.get('size', None)
4995
        if size is None:
4996
          raise errors.OpPrereqError("Required disk parameter size missing")
4997
        try:
4998
          size = int(size)
4999
        except ValueError, err:
5000
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5001
                                     str(err))
5002
        disk_dict['size'] = size
5003
      else:
5004
        # modification of disk
5005
        if 'size' in disk_dict:
5006
          raise errors.OpPrereqError("Disk size change not possible, use"
5007
                                     " grow-disk")
5008

    
5009
    if disk_addremove > 1:
5010
      raise errors.OpPrereqError("Only one disk add or remove operation"
5011
                                 " supported at a time")
5012

    
5013
    # NIC validation
5014
    nic_addremove = 0
5015
    for nic_op, nic_dict in self.op.nics:
5016
      if nic_op == constants.DDM_REMOVE:
5017
        nic_addremove += 1
5018
        continue
5019
      elif nic_op == constants.DDM_ADD:
5020
        nic_addremove += 1
5021
      else:
5022
        if not isinstance(nic_op, int):
5023
          raise errors.OpPrereqError("Invalid nic index")
5024

    
5025
      # nic_dict should be a dict
5026
      nic_ip = nic_dict.get('ip', None)
5027
      if nic_ip is not None:
5028
        if nic_ip.lower() == "none":
5029
          nic_dict['ip'] = None
5030
        else:
5031
          if not utils.IsValidIP(nic_ip):
5032
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
5033
      # we can only check None bridges and assign the default one
5034
      nic_bridge = nic_dict.get('bridge', None)
5035
      if nic_bridge is None:
5036
        nic_dict['bridge'] = self.cfg.GetDefBridge()
5037
      # but we can validate MACs
5038
      nic_mac = nic_dict.get('mac', None)
5039
      if nic_mac is not None:
5040
        if self.cfg.IsMacInUse(nic_mac):
5041
          raise errors.OpPrereqError("MAC address %s already in use"
5042
                                     " in cluster" % nic_mac)
5043
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5044
          if not utils.IsValidMac(nic_mac):
5045
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
5046
    if nic_addremove > 1:
5047
      raise errors.OpPrereqError("Only one NIC add or remove operation"
5048
                                 " supported at a time")
5049

    
5050
  def ExpandNames(self):
5051
    self._ExpandAndLockInstance()
5052
    self.needed_locks[locking.LEVEL_NODE] = []
5053
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5054

    
5055
  def DeclareLocks(self, level):
5056
    if level == locking.LEVEL_NODE:
5057
      self._LockInstancesNodes()
5058

    
5059
  def BuildHooksEnv(self):
5060
    """Build hooks env.
5061

5062
    This runs on the master, primary and secondaries.
5063

5064
    """
5065
    args = dict()
5066
    if constants.BE_MEMORY in self.be_new:
5067
      args['memory'] = self.be_new[constants.BE_MEMORY]
5068
    if constants.BE_VCPUS in self.be_new:
5069
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
5070
    # FIXME: readd disk/nic changes
5071
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
5072
    nl = [self.cfg.GetMasterNode(),
5073
          self.instance.primary_node] + list(self.instance.secondary_nodes)
5074
    return env, nl, nl
5075

    
5076
  def CheckPrereq(self):
5077
    """Check prerequisites.
5078

5079
    This only checks the instance list against the existing names.
5080

5081
    """
5082
    force = self.force = self.op.force
5083

    
5084
    # checking the new params on the primary/secondary nodes
5085

    
5086
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5087
    assert self.instance is not None, \
5088
      "Cannot retrieve locked instance %s" % self.op.instance_name
5089
    pnode = self.instance.primary_node
5090
    nodelist = [pnode]
5091
    nodelist.extend(instance.secondary_nodes)
5092

    
5093
    # hvparams processing
5094
    if self.op.hvparams:
5095
      i_hvdict = copy.deepcopy(instance.hvparams)
5096
      for key, val in self.op.hvparams.iteritems():
5097
        if val == constants.VALUE_DEFAULT:
5098
          try:
5099
            del i_hvdict[key]
5100
          except KeyError:
5101
            pass
5102
        elif val == constants.VALUE_NONE:
5103
          i_hvdict[key] = None
5104
        else:
5105
          i_hvdict[key] = val
5106
      cluster = self.cfg.GetClusterInfo()
5107
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
5108
                                i_hvdict)
5109
      # local check
5110
      hypervisor.GetHypervisor(
5111
        instance.hypervisor).CheckParameterSyntax(hv_new)
5112
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
5113
      self.hv_new = hv_new # the new actual values
5114
      self.hv_inst = i_hvdict # the new dict (without defaults)
5115
    else:
5116
      self.hv_new = self.hv_inst = {}
5117

    
5118
    # beparams processing
5119
    if self.op.beparams:
5120
      i_bedict = copy.deepcopy(instance.beparams)
5121
      for key, val in self.op.beparams.iteritems():
5122
        if val == constants.VALUE_DEFAULT:
5123
          try:
5124
            del i_bedict[key]
5125
          except KeyError:
5126
            pass
5127
        else:
5128
          i_bedict[key] = val
5129
      cluster = self.cfg.GetClusterInfo()
5130
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
5131
                                i_bedict)
5132
      self.be_new = be_new # the new actual values
5133
      self.be_inst = i_bedict # the new dict (without defaults)
5134
    else:
5135
      self.be_new = self.be_inst = {}
5136

    
5137
    self.warn = []
5138

    
5139
    if constants.BE_MEMORY in self.op.beparams and not self.force:
5140
      mem_check_list = [pnode]
5141
      if be_new[constants.BE_AUTO_BALANCE]:
5142
        # either we changed auto_balance to yes or it was from before
5143
        mem_check_list.extend(instance.secondary_nodes)
5144
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
5145
                                                  instance.hypervisor)
5146
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
5147
                                         instance.hypervisor)
5148
      if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
5149
        # Assume the primary node is unreachable and go ahead
5150
        self.warn.append("Can't get info from primary node %s" % pnode)
5151
      else:
5152
        if not instance_info.failed and instance_info.data:
5153
          current_mem = instance_info.data['memory']
5154
        else:
5155
          # Assume instance not running
5156
          # (there is a slight race condition here, but it's not very probable,
5157
          # and we have no other way to check)
5158
          current_mem = 0
5159
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
5160
                    nodeinfo[pnode].data['memory_free'])
5161
        if miss_mem > 0:
5162
          raise errors.OpPrereqError("This change will prevent the instance"
5163
                                     " from starting, due to %d MB of memory"
5164
                                     " missing on its primary node" % miss_mem)
5165

    
5166
      if be_new[constants.BE_AUTO_BALANCE]:
5167
        for node, nres in instance.secondary_nodes.iteritems():
5168
          if nres.failed or not isinstance(nres.data, dict):
5169
            self.warn.append("Can't get info from secondary node %s" % node)
5170
          elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
5171
            self.warn.append("Not enough memory to failover instance to"
5172
                             " secondary node %s" % node)
5173

    
5174
    # NIC processing
5175
    for nic_op, nic_dict in self.op.nics:
5176
      if nic_op == constants.DDM_REMOVE:
5177
        if not instance.nics:
5178
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
5179
        continue
5180
      if nic_op != constants.DDM_ADD:
5181
        # an existing nic
5182
        if nic_op < 0 or nic_op >= len(instance.nics):
5183
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
5184
                                     " are 0 to %d" %
5185
                                     (nic_op, len(instance.nics)))
5186
      nic_bridge = nic_dict.get('bridge', None)
5187
      if nic_bridge is not None:
5188
        if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
5189
          msg = ("Bridge '%s' doesn't exist on one of"
5190
                 " the instance nodes" % nic_bridge)
5191
          if self.force:
5192
            self.warn.append(msg)
5193
          else:
5194
            raise errors.OpPrereqError(msg)
5195

    
5196
    # DISK processing
5197
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
5198
      raise errors.OpPrereqError("Disk operations not supported for"
5199
                                 " diskless instances")
5200
    for disk_op, disk_dict in self.op.disks:
5201
      if disk_op == constants.DDM_REMOVE:
5202
        if len(instance.disks) == 1:
5203
          raise errors.OpPrereqError("Cannot remove the last disk of"
5204
                                     " an instance")
5205
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
5206
        ins_l = ins_l[pnode]
5207
        if not type(ins_l) is list:
5208
          raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
5209
        if instance.name in ins_l:
5210
          raise errors.OpPrereqError("Instance is running, can't remove"
5211
                                     " disks.")
5212

    
5213
      if (disk_op == constants.DDM_ADD and
5214
          len(instance.nics) >= constants.MAX_DISKS):
5215
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
5216
                                   " add more" % constants.MAX_DISKS)
5217
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
5218
        # an existing disk
5219
        if disk_op < 0 or disk_op >= len(instance.disks):
5220
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
5221
                                     " are 0 to %d" %
5222
                                     (disk_op, len(instance.disks)))
5223

    
5224
    return
5225

    
5226
  def Exec(self, feedback_fn):
5227
    """Modifies an instance.
5228

5229
    All parameters take effect only at the next restart of the instance.
5230

5231
    """
5232
    # Process here the warnings from CheckPrereq, as we don't have a
5233
    # feedback_fn there.
5234
    for warn in self.warn:
5235
      feedback_fn("WARNING: %s" % warn)
5236

    
5237
    result = []
5238
    instance = self.instance
5239
    # disk changes
5240
    for disk_op, disk_dict in self.op.disks:
5241
      if disk_op == constants.DDM_REMOVE:
5242
        # remove the last disk
5243
        device = instance.disks.pop()
5244
        device_idx = len(instance.disks)
5245
        for node, disk in device.ComputeNodeTree(instance.primary_node):
5246
          self.cfg.SetDiskID(disk, node)
5247
          result = self.rpc.call_blockdev_remove(node, disk)
5248
          if result.failed or not result.data:
5249
            self.proc.LogWarning("Could not remove disk/%d on node %s,"
5250
                                 " continuing anyway", device_idx, node)
5251
        result.append(("disk/%d" % device_idx, "remove"))
5252
      elif disk_op == constants.DDM_ADD:
5253
        # add a new disk
5254
        if instance.disk_template == constants.DT_FILE:
5255
          file_driver, file_path = instance.disks[0].logical_id
5256
          file_path = os.path.dirname(file_path)
5257
        else:
5258
          file_driver = file_path = None
5259
        disk_idx_base = len(instance.disks)
5260
        new_disk = _GenerateDiskTemplate(self,
5261
                                         instance.disk_template,
5262
                                         instance, instance.primary_node,
5263
                                         instance.secondary_nodes,
5264
                                         [disk_dict],
5265
                                         file_path,
5266
                                         file_driver,
5267
                                         disk_idx_base)[0]
5268
        new_disk.mode = disk_dict['mode']
5269
        instance.disks.append(new_disk)
5270
        info = _GetInstanceInfoText(instance)
5271

    
5272
        logging.info("Creating volume %s for instance %s",
5273
                     new_disk.iv_name, instance.name)
5274
        # Note: this needs to be kept in sync with _CreateDisks
5275
        #HARDCODE
5276
        for secondary_node in instance.secondary_nodes:
5277
          if not _CreateBlockDevOnSecondary(self, secondary_node, instance,
5278
                                            new_disk, False, info):
5279
            self.LogWarning("Failed to create volume %s (%s) on"
5280
                            " secondary node %s!",
5281
                            new_disk.iv_name, new_disk, secondary_node)
5282
        #HARDCODE
5283
        if not _CreateBlockDevOnPrimary(self, instance.primary_node,
5284
                                        instance, new_disk, info):
5285
          self.LogWarning("Failed to create volume %s on primary!",
5286
                          new_disk.iv_name)
5287
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
5288
                       (new_disk.size, new_disk.mode)))
5289
      else:
5290
        # change a given disk
5291
        instance.disks[disk_op].mode = disk_dict['mode']
5292
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
5293
    # NIC changes
5294
    for nic_op, nic_dict in self.op.nics:
5295
      if nic_op == constants.DDM_REMOVE:
5296
        # remove the last nic
5297
        del instance.nics[-1]
5298
        result.append(("nic.%d" % len(instance.nics), "remove"))
5299
      elif nic_op == constants.DDM_ADD:
5300
        # add a new nic
5301
        if 'mac' not in nic_dict:
5302
          mac = constants.VALUE_GENERATE
5303
        else:
5304
          mac = nic_dict['mac']
5305
        if mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5306
          mac = self.cfg.GenerateMAC()
5307
        new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
5308
                              bridge=nic_dict.get('bridge', None))
5309
        instance.nics.append(new_nic)
5310
        result.append(("nic.%d" % (len(instance.nics) - 1),
5311
                       "add:mac=%s,ip=%s,bridge=%s" %
5312
                       (new_nic.mac, new_nic.ip, new_nic.bridge)))
5313
      else:
5314
        # change a given nic
5315
        for key in 'mac', 'ip', 'bridge':
5316
          if key in nic_dict:
5317
            setattr(instance.nics[nic_op], key, nic_dict[key])
5318
            result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
5319

    
5320
    # hvparams changes
5321
    if self.op.hvparams:
5322
      instance.hvparams = self.hv_new
5323
      for key, val in self.op.hvparams.iteritems():
5324
        result.append(("hv/%s" % key, val))
5325

    
5326
    # beparams changes
5327
    if self.op.beparams:
5328
      instance.beparams = self.be_inst
5329
      for key, val in self.op.beparams.iteritems():
5330
        result.append(("be/%s" % key, val))
5331

    
5332
    self.cfg.Update(instance)
5333

    
5334
    return result
5335

    
5336

    
5337
class LUQueryExports(NoHooksLU):
5338
  """Query the exports list
5339

5340
  """
5341
  _OP_REQP = ['nodes']
5342
  REQ_BGL = False
5343

    
5344
  def ExpandNames(self):
5345
    self.needed_locks = {}
5346
    self.share_locks[locking.LEVEL_NODE] = 1
5347
    if not self.op.nodes:
5348
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5349
    else:
5350
      self.needed_locks[locking.LEVEL_NODE] = \
5351
        _GetWantedNodes(self, self.op.nodes)
5352

    
5353
  def CheckPrereq(self):
5354
    """Check prerequisites.
5355

5356
    """
5357
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
5358

    
5359
  def Exec(self, feedback_fn):
5360
    """Compute the list of all the exported system images.
5361

5362
    @rtype: dict
5363
    @return: a dictionary with the structure node->(export-list)
5364
        where export-list is a list of the instances exported on
5365
        that node.
5366

5367
    """
5368
    rpcresult = self.rpc.call_export_list(self.nodes)
5369
    result = {}
5370
    for node in rpcresult:
5371
      if rpcresult[node].failed:
5372
        result[node] = False
5373
      else:
5374
        result[node] = rpcresult[node].data
5375

    
5376
    return result
5377

    
5378

    
5379
class LUExportInstance(LogicalUnit):
5380
  """Export an instance to an image in the cluster.
5381

5382
  """
5383
  HPATH = "instance-export"
5384
  HTYPE = constants.HTYPE_INSTANCE
5385
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
5386
  REQ_BGL = False
5387

    
5388
  def ExpandNames(self):
5389
    self._ExpandAndLockInstance()
5390
    # FIXME: lock only instance primary and destination node
5391
    #
5392
    # Sad but true, for now we have do lock all nodes, as we don't know where
5393
    # the previous export might be, and and in this LU we search for it and
5394
    # remove it from its current node. In the future we could fix this by:
5395
    #  - making a tasklet to search (share-lock all), then create the new one,
5396
    #    then one to remove, after
5397
    #  - removing the removal operation altoghether
5398
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5399

    
5400
  def DeclareLocks(self, level):
5401
    """Last minute lock declaration."""
5402
    # All nodes are locked anyway, so nothing to do here.
5403

    
5404
  def BuildHooksEnv(self):
5405
    """Build hooks env.
5406

5407
    This will run on the master, primary node and target node.
5408

5409
    """
5410
    env = {
5411
      "EXPORT_NODE": self.op.target_node,
5412
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
5413
      }
5414
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5415
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
5416
          self.op.target_node]
5417
    return env, nl, nl
5418

    
5419
  def CheckPrereq(self):
5420
    """Check prerequisites.
5421

5422
    This checks that the instance and node names are valid.
5423

5424
    """
5425
    instance_name = self.op.instance_name
5426
    self.instance = self.cfg.GetInstanceInfo(instance_name)
5427
    assert self.instance is not None, \
5428
          "Cannot retrieve locked instance %s" % self.op.instance_name
5429

    
5430
    self.dst_node = self.cfg.GetNodeInfo(
5431
      self.cfg.ExpandNodeName(self.op.target_node))
5432

    
5433
    if self.dst_node is None:
5434
      # This is wrong node name, not a non-locked node
5435
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
5436

    
5437
    # instance disk type verification
5438
    for disk in self.instance.disks:
5439
      if disk.dev_type == constants.LD_FILE:
5440
        raise errors.OpPrereqError("Export not supported for instances with"
5441
                                   " file-based disks")
5442

    
5443
  def Exec(self, feedback_fn):
5444
    """Export an instance to an image in the cluster.
5445

5446
    """
5447
    instance = self.instance
5448
    dst_node = self.dst_node
5449
    src_node = instance.primary_node
5450
    if self.op.shutdown:
5451
      # shutdown the instance, but not the disks
5452
      result = self.rpc.call_instance_shutdown(src_node, instance)
5453
      result.Raise()
5454
      if not result.data:
5455
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
5456
                                 (instance.name, src_node))
5457

    
5458
    vgname = self.cfg.GetVGName()
5459

    
5460
    snap_disks = []
5461

    
5462
    try:
5463
      for disk in instance.disks:
5464
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
5465
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
5466
        if new_dev_name.failed or not new_dev_name.data:
5467
          self.LogWarning("Could not snapshot block device %s on node %s",
5468
                          disk.logical_id[1], src_node)
5469
          snap_disks.append(False)
5470
        else:
5471
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
5472
                                 logical_id=(vgname, new_dev_name.data),
5473
                                 physical_id=(vgname, new_dev_name.data),
5474
                                 iv_name=disk.iv_name)
5475
          snap_disks.append(new_dev)
5476

    
5477
    finally:
5478
      if self.op.shutdown and instance.status == "up":
5479
        result = self.rpc.call_instance_start(src_node, instance, None)
5480
        if result.failed or not result.data:
5481
          _ShutdownInstanceDisks(self, instance)
5482
          raise errors.OpExecError("Could not start instance")
5483

    
5484
    # TODO: check for size
5485

    
5486
    cluster_name = self.cfg.GetClusterName()
5487
    for idx, dev in enumerate(snap_disks):
5488
      if dev:
5489
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
5490
                                               instance, cluster_name, idx)
5491
        if result.failed or not result.data:
5492
          self.LogWarning("Could not export block device %s from node %s to"
5493
                          " node %s", dev.logical_id[1], src_node,
5494
                          dst_node.name)
5495
        result = self.rpc.call_blockdev_remove(src_node, dev)
5496
        if result.failed or not result.data:
5497
          self.LogWarning("Could not remove snapshot block device %s from node"
5498
                          " %s", dev.logical_id[1], src_node)
5499

    
5500
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
5501
    if result.failed or not result.data:
5502
      self.LogWarning("Could not finalize export for instance %s on node %s",
5503
                      instance.name, dst_node.name)
5504

    
5505
    nodelist = self.cfg.GetNodeList()
5506
    nodelist.remove(dst_node.name)
5507

    
5508
    # on one-node clusters nodelist will be empty after the removal
5509
    # if we proceed the backup would be removed because OpQueryExports
5510
    # substitutes an empty list with the full cluster node list.
5511
    if nodelist:
5512
      exportlist = self.rpc.call_export_list(nodelist)
5513
      for node in exportlist:
5514
        if exportlist[node].failed:
5515
          continue
5516
        if instance.name in exportlist[node].data:
5517
          if not self.rpc.call_export_remove(node, instance.name):
5518
            self.LogWarning("Could not remove older export for instance %s"
5519
                            " on node %s", instance.name, node)
5520

    
5521

    
5522
class LURemoveExport(NoHooksLU):
5523
  """Remove exports related to the named instance.
5524

5525
  """
5526
  _OP_REQP = ["instance_name"]
5527
  REQ_BGL = False
5528

    
5529
  def ExpandNames(self):
5530
    self.needed_locks = {}
5531
    # We need all nodes to be locked in order for RemoveExport to work, but we
5532
    # don't need to lock the instance itself, as nothing will happen to it (and
5533
    # we can remove exports also for a removed instance)
5534
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5535

    
5536
  def CheckPrereq(self):
5537
    """Check prerequisites.
5538
    """
5539
    pass
5540

    
5541
  def Exec(self, feedback_fn):
5542
    """Remove any export.
5543

5544
    """
5545
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
5546
    # If the instance was not found we'll try with the name that was passed in.
5547
    # This will only work if it was an FQDN, though.
5548
    fqdn_warn = False
5549
    if not instance_name:
5550
      fqdn_warn = True
5551
      instance_name = self.op.instance_name
5552

    
5553
    exportlist = self.rpc.call_export_list(self.acquired_locks[
5554
      locking.LEVEL_NODE])
5555
    found = False
5556
    for node in exportlist:
5557
      if exportlist[node].failed:
5558
        self.LogWarning("Failed to query node %s, continuing" % node)
5559
        continue
5560
      if instance_name in exportlist[node].data:
5561
        found = True
5562
        result = self.rpc.call_export_remove(node, instance_name)
5563
        if result.failed or not result.data:
5564
          logging.error("Could not remove export for instance %s"
5565
                        " on node %s", instance_name, node)
5566

    
5567
    if fqdn_warn and not found:
5568
      feedback_fn("Export not found. If trying to remove an export belonging"
5569
                  " to a deleted instance please use its Fully Qualified"
5570
                  " Domain Name.")
5571

    
5572

    
5573
class TagsLU(NoHooksLU):
5574
  """Generic tags LU.
5575

5576
  This is an abstract class which is the parent of all the other tags LUs.
5577

5578
  """
5579

    
5580
  def ExpandNames(self):
5581
    self.needed_locks = {}
5582
    if self.op.kind == constants.TAG_NODE:
5583
      name = self.cfg.ExpandNodeName(self.op.name)
5584
      if name is None:
5585
        raise errors.OpPrereqError("Invalid node name (%s)" %
5586
                                   (self.op.name,))
5587
      self.op.name = name
5588
      self.needed_locks[locking.LEVEL_NODE] = name
5589
    elif self.op.kind == constants.TAG_INSTANCE:
5590
      name = self.cfg.ExpandInstanceName(self.op.name)
5591
      if name is None:
5592
        raise errors.OpPrereqError("Invalid instance name (%s)" %
5593
                                   (self.op.name,))
5594
      self.op.name = name
5595
      self.needed_locks[locking.LEVEL_INSTANCE] = name
5596

    
5597
  def CheckPrereq(self):
5598
    """Check prerequisites.
5599

5600
    """
5601
    if self.op.kind == constants.TAG_CLUSTER:
5602
      self.target = self.cfg.GetClusterInfo()
5603
    elif self.op.kind == constants.TAG_NODE:
5604
      self.target = self.cfg.GetNodeInfo(self.op.name)
5605
    elif self.op.kind == constants.TAG_INSTANCE:
5606
      self.target = self.cfg.GetInstanceInfo(self.op.name)
5607
    else:
5608
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
5609
                                 str(self.op.kind))
5610

    
5611

    
5612
class LUGetTags(TagsLU):
5613
  """Returns the tags of a given object.
5614

5615
  """
5616
  _OP_REQP = ["kind", "name"]
5617
  REQ_BGL = False
5618

    
5619
  def Exec(self, feedback_fn):
5620
    """Returns the tag list.
5621

5622
    """
5623
    return list(self.target.GetTags())
5624

    
5625

    
5626
class LUSearchTags(NoHooksLU):
5627
  """Searches the tags for a given pattern.
5628

5629
  """
5630
  _OP_REQP = ["pattern"]
5631
  REQ_BGL = False
5632

    
5633
  def ExpandNames(self):
5634
    self.needed_locks = {}
5635

    
5636
  def CheckPrereq(self):
5637
    """Check prerequisites.
5638

5639
    This checks the pattern passed for validity by compiling it.
5640

5641
    """
5642
    try:
5643
      self.re = re.compile(self.op.pattern)
5644
    except re.error, err:
5645
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
5646
                                 (self.op.pattern, err))
5647

    
5648
  def Exec(self, feedback_fn):
5649
    """Returns the tag list.
5650

5651
    """
5652
    cfg = self.cfg
5653
    tgts = [("/cluster", cfg.GetClusterInfo())]
5654
    ilist = cfg.GetAllInstancesInfo().values()
5655
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
5656
    nlist = cfg.GetAllNodesInfo().values()
5657
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
5658
    results = []
5659
    for path, target in tgts:
5660
      for tag in target.GetTags():
5661
        if self.re.search(tag):
5662
          results.append((path, tag))
5663
    return results
5664

    
5665

    
5666
class LUAddTags(TagsLU):
5667
  """Sets a tag on a given object.
5668

5669
  """
5670
  _OP_REQP = ["kind", "name", "tags"]
5671
  REQ_BGL = False
5672

    
5673
  def CheckPrereq(self):
5674
    """Check prerequisites.
5675

5676
    This checks the type and length of the tag name and value.
5677

5678
    """
5679
    TagsLU.CheckPrereq(self)
5680
    for tag in self.op.tags:
5681
      objects.TaggableObject.ValidateTag(tag)
5682

    
5683
  def Exec(self, feedback_fn):
5684
    """Sets the tag.
5685

5686
    """
5687
    try:
5688
      for tag in self.op.tags:
5689
        self.target.AddTag(tag)
5690
    except errors.TagError, err:
5691
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
5692
    try:
5693
      self.cfg.Update(self.target)
5694
    except errors.ConfigurationError:
5695
      raise errors.OpRetryError("There has been a modification to the"
5696
                                " config file and the operation has been"
5697
                                " aborted. Please retry.")
5698

    
5699

    
5700
class LUDelTags(TagsLU):
5701
  """Delete a list of tags from a given object.
5702

5703
  """
5704
  _OP_REQP = ["kind", "name", "tags"]
5705
  REQ_BGL = False
5706

    
5707
  def CheckPrereq(self):
5708
    """Check prerequisites.
5709

5710
    This checks that we have the given tag.
5711

5712
    """
5713
    TagsLU.CheckPrereq(self)
5714
    for tag in self.op.tags:
5715
      objects.TaggableObject.ValidateTag(tag)
5716
    del_tags = frozenset(self.op.tags)
5717
    cur_tags = self.target.GetTags()
5718
    if not del_tags <= cur_tags:
5719
      diff_tags = del_tags - cur_tags
5720
      diff_names = ["'%s'" % tag for tag in diff_tags]
5721
      diff_names.sort()
5722
      raise errors.OpPrereqError("Tag(s) %s not found" %
5723
                                 (",".join(diff_names)))
5724

    
5725
  def Exec(self, feedback_fn):
5726
    """Remove the tag from the object.
5727

5728
    """
5729
    for tag in self.op.tags:
5730
      self.target.RemoveTag(tag)
5731
    try:
5732
      self.cfg.Update(self.target)
5733
    except errors.ConfigurationError:
5734
      raise errors.OpRetryError("There has been a modification to the"
5735
                                " config file and the operation has been"
5736
                                " aborted. Please retry.")
5737

    
5738

    
5739
class LUTestDelay(NoHooksLU):
5740
  """Sleep for a specified amount of time.
5741

5742
  This LU sleeps on the master and/or nodes for a specified amount of
5743
  time.
5744

5745
  """
5746
  _OP_REQP = ["duration", "on_master", "on_nodes"]
5747
  REQ_BGL = False
5748

    
5749
  def ExpandNames(self):
5750
    """Expand names and set required locks.
5751

5752
    This expands the node list, if any.
5753

5754
    """
5755
    self.needed_locks = {}
5756
    if self.op.on_nodes:
5757
      # _GetWantedNodes can be used here, but is not always appropriate to use
5758
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
5759
      # more information.
5760
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
5761
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
5762

    
5763
  def CheckPrereq(self):
5764
    """Check prerequisites.
5765

5766
    """
5767

    
5768
  def Exec(self, feedback_fn):
5769
    """Do the actual sleep.
5770

5771
    """
5772
    if self.op.on_master:
5773
      if not utils.TestDelay(self.op.duration):
5774
        raise errors.OpExecError("Error during master delay test")
5775
    if self.op.on_nodes:
5776
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
5777
      if not result:
5778
        raise errors.OpExecError("Complete failure from rpc call")
5779
      for node, node_result in result.items():
5780
        node_result.Raise()
5781
        if not node_result.data:
5782
          raise errors.OpExecError("Failure during rpc call to node %s,"
5783
                                   " result: %s" % (node, node_result.data))
5784

    
5785

    
5786
class IAllocator(object):
5787
  """IAllocator framework.
5788

5789
  An IAllocator instance has three sets of attributes:
5790
    - cfg that is needed to query the cluster
5791
    - input data (all members of the _KEYS class attribute are required)
5792
    - four buffer attributes (in|out_data|text), that represent the
5793
      input (to the external script) in text and data structure format,
5794
      and the output from it, again in two formats
5795
    - the result variables from the script (success, info, nodes) for
5796
      easy usage
5797

5798
  """
5799
  _ALLO_KEYS = [
5800
    "mem_size", "disks", "disk_template",
5801
    "os", "tags", "nics", "vcpus", "hypervisor",
5802
    ]
5803
  _RELO_KEYS = [
5804
    "relocate_from",
5805
    ]
5806

    
5807
  def __init__(self, lu, mode, name, **kwargs):
5808
    self.lu = lu
5809
    # init buffer variables
5810
    self.in_text = self.out_text = self.in_data = self.out_data = None
5811
    # init all input fields so that pylint is happy
5812
    self.mode = mode
5813
    self.name = name
5814
    self.mem_size = self.disks = self.disk_template = None
5815
    self.os = self.tags = self.nics = self.vcpus = None
5816
    self.relocate_from = None
5817
    # computed fields
5818
    self.required_nodes = None
5819
    # init result fields
5820
    self.success = self.info = self.nodes = None
5821
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5822
      keyset = self._ALLO_KEYS
5823
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5824
      keyset = self._RELO_KEYS
5825
    else:
5826
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
5827
                                   " IAllocator" % self.mode)
5828
    for key in kwargs:
5829
      if key not in keyset:
5830
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
5831
                                     " IAllocator" % key)
5832
      setattr(self, key, kwargs[key])
5833
    for key in keyset:
5834
      if key not in kwargs:
5835
        raise errors.ProgrammerError("Missing input parameter '%s' to"
5836
                                     " IAllocator" % key)
5837
    self._BuildInputData()
5838

    
5839
  def _ComputeClusterData(self):
5840
    """Compute the generic allocator input data.
5841

5842
    This is the data that is independent of the actual operation.
5843

5844
    """
5845
    cfg = self.lu.cfg
5846
    cluster_info = cfg.GetClusterInfo()
5847
    # cluster data
5848
    data = {
5849
      "version": 1,
5850
      "cluster_name": cfg.GetClusterName(),
5851
      "cluster_tags": list(cluster_info.GetTags()),
5852
      "enable_hypervisors": list(cluster_info.enabled_hypervisors),
5853
      # we don't have job IDs
5854
      }
5855
    iinfo = cfg.GetAllInstancesInfo().values()
5856
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
5857

    
5858
    # node data
5859
    node_results = {}
5860
    node_list = cfg.GetNodeList()
5861

    
5862
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
5863
      hypervisor = self.hypervisor
5864
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
5865
      hypervisor = cfg.GetInstanceInfo(self.name).hypervisor
5866

    
5867
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
5868
                                           hypervisor)
5869
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
5870
                       cluster_info.enabled_hypervisors)
5871
    for nname in node_list:
5872
      ninfo = cfg.GetNodeInfo(nname)
5873
      node_data[nname].Raise()
5874
      if not isinstance(node_data[nname].data, dict):
5875
        raise errors.OpExecError("Can't get data for node %s" % nname)
5876
      remote_info = node_data[nname].data
5877
      for attr in ['memory_total', 'memory_free', 'memory_dom0',
5878
                   'vg_size', 'vg_free', 'cpu_total']:
5879
        if attr not in remote_info:
5880
          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
5881
                                   (nname, attr))
5882
        try:
5883
          remote_info[attr] = int(remote_info[attr])
5884
        except ValueError, err:
5885
          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
5886
                                   " %s" % (nname, attr, str(err)))
5887
      # compute memory used by primary instances
5888
      i_p_mem = i_p_up_mem = 0
5889
      for iinfo, beinfo in i_list:
5890
        if iinfo.primary_node == nname:
5891
          i_p_mem += beinfo[constants.BE_MEMORY]
5892
          if iinfo.name not in node_iinfo[nname]:
5893
            i_used_mem = 0
5894
          else:
5895
            i_used_mem = int(node_iinfo[nname][iinfo.name]['memory'])
5896
          i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
5897
          remote_info['memory_free'] -= max(0, i_mem_diff)
5898

    
5899
          if iinfo.status == "up":
5900
            i_p_up_mem += beinfo[constants.BE_MEMORY]
5901

    
5902
      # compute memory used by instances
5903
      pnr = {
5904
        "tags": list(ninfo.GetTags()),
5905
        "total_memory": remote_info['memory_total'],
5906
        "reserved_memory": remote_info['memory_dom0'],
5907
        "free_memory": remote_info['memory_free'],
5908
        "i_pri_memory": i_p_mem,
5909
        "i_pri_up_memory": i_p_up_mem,
5910
        "total_disk": remote_info['vg_size'],
5911
        "free_disk": remote_info['vg_free'],
5912
        "primary_ip": ninfo.primary_ip,
5913
        "secondary_ip": ninfo.secondary_ip,
5914
        "total_cpus": remote_info['cpu_total'],
5915
        "offline": ninfo.offline,
5916
        }
5917
      node_results[nname] = pnr
5918
    data["nodes"] = node_results
5919

    
5920
    # instance data
5921
    instance_data = {}
5922
    for iinfo, beinfo in i_list:
5923
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
5924
                  for n in iinfo.nics]
5925
      pir = {
5926
        "tags": list(iinfo.GetTags()),
5927
        "should_run": iinfo.status == "up",
5928
        "vcpus": beinfo[constants.BE_VCPUS],
5929
        "memory": beinfo[constants.BE_MEMORY],
5930
        "os": iinfo.os,
5931
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
5932
        "nics": nic_data,
5933
        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
5934
        "disk_template": iinfo.disk_template,
5935
        "hypervisor": iinfo.hypervisor,
5936
        }
5937
      instance_data[iinfo.name] = pir
5938

    
5939
    data["instances"] = instance_data
5940

    
5941
    self.in_data = data
5942

    
5943
  def _AddNewInstance(self):
5944
    """Add new instance data to allocator structure.
5945

5946
    This in combination with _AllocatorGetClusterData will create the
5947
    correct structure needed as input for the allocator.
5948

5949
    The checks for the completeness of the opcode must have already been
5950
    done.
5951

5952
    """
5953
    data = self.in_data
5954
    if len(self.disks) != 2:
5955
      raise errors.OpExecError("Only two-disk configurations supported")
5956

    
5957
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
5958

    
5959
    if self.disk_template in constants.DTS_NET_MIRROR:
5960
      self.required_nodes = 2
5961
    else:
5962
      self.required_nodes = 1
5963
    request = {
5964
      "type": "allocate",
5965
      "name": self.name,
5966
      "disk_template": self.disk_template,
5967
      "tags": self.tags,
5968
      "os": self.os,
5969
      "vcpus": self.vcpus,
5970
      "memory": self.mem_size,
5971
      "disks": self.disks,
5972
      "disk_space_total": disk_space,
5973
      "nics": self.nics,
5974
      "required_nodes": self.required_nodes,
5975
      }
5976
    data["request"] = request
5977

    
5978
  def _AddRelocateInstance(self):
5979
    """Add relocate instance data to allocator structure.
5980

5981
    This in combination with _IAllocatorGetClusterData will create the
5982
    correct structure needed as input for the allocator.
5983

5984
    The checks for the completeness of the opcode must have already been
5985
    done.
5986

5987
    """
5988
    instance = self.lu.cfg.GetInstanceInfo(self.name)
5989
    if instance is None:
5990
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
5991
                                   " IAllocator" % self.name)
5992

    
5993
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5994
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
5995

    
5996
    if len(instance.secondary_nodes) != 1:
5997
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
5998

    
5999
    self.required_nodes = 1
6000
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
6001
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
6002

    
6003
    request = {
6004
      "type": "relocate",
6005
      "name": self.name,
6006
      "disk_space_total": disk_space,
6007
      "required_nodes": self.required_nodes,
6008
      "relocate_from": self.relocate_from,
6009
      }
6010
    self.in_data["request"] = request
6011

    
6012
  def _BuildInputData(self):
6013
    """Build input data structures.
6014

6015
    """
6016
    self._ComputeClusterData()
6017

    
6018
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6019
      self._AddNewInstance()
6020
    else:
6021
      self._AddRelocateInstance()
6022

    
6023
    self.in_text = serializer.Dump(self.in_data)
6024

    
6025
  def Run(self, name, validate=True, call_fn=None):
6026
    """Run an instance allocator and return the results.
6027

6028
    """
6029
    if call_fn is None:
6030
      call_fn = self.lu.rpc.call_iallocator_runner
6031
    data = self.in_text
6032

    
6033
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
6034
    result.Raise()
6035

    
6036
    if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
6037
      raise errors.OpExecError("Invalid result from master iallocator runner")
6038

    
6039
    rcode, stdout, stderr, fail = result.data
6040

    
6041
    if rcode == constants.IARUN_NOTFOUND:
6042
      raise errors.OpExecError("Can't find allocator '%s'" % name)
6043
    elif rcode == constants.IARUN_FAILURE:
6044
      raise errors.OpExecError("Instance allocator call failed: %s,"
6045
                               " output: %s" % (fail, stdout+stderr))
6046
    self.out_text = stdout
6047
    if validate:
6048
      self._ValidateResult()
6049

    
6050
  def _ValidateResult(self):
6051
    """Process the allocator results.
6052

6053
    This will process and if successful save the result in
6054
    self.out_data and the other parameters.
6055

6056
    """
6057
    try:
6058
      rdict = serializer.Load(self.out_text)
6059
    except Exception, err:
6060
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
6061

    
6062
    if not isinstance(rdict, dict):
6063
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
6064

    
6065
    for key in "success", "info", "nodes":
6066
      if key not in rdict:
6067
        raise errors.OpExecError("Can't parse iallocator results:"
6068
                                 " missing key '%s'" % key)
6069
      setattr(self, key, rdict[key])
6070

    
6071
    if not isinstance(rdict["nodes"], list):
6072
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
6073
                               " is not a list")
6074
    self.out_data = rdict
6075

    
6076

    
6077
class LUTestAllocator(NoHooksLU):
6078
  """Run allocator tests.
6079

6080
  This LU runs the allocator tests
6081

6082
  """
6083
  _OP_REQP = ["direction", "mode", "name"]
6084

    
6085
  def CheckPrereq(self):
6086
    """Check prerequisites.
6087

6088
    This checks the opcode parameters depending on the director and mode test.
6089

6090
    """
6091
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6092
      for attr in ["name", "mem_size", "disks", "disk_template",
6093
                   "os", "tags", "nics", "vcpus"]:
6094
        if not hasattr(self.op, attr):
6095
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
6096
                                     attr)
6097
      iname = self.cfg.ExpandInstanceName(self.op.name)
6098
      if iname is not None:
6099
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
6100
                                   iname)
6101
      if not isinstance(self.op.nics, list):
6102
        raise errors.OpPrereqError("Invalid parameter 'nics'")
6103
      for row in self.op.nics:
6104
        if (not isinstance(row, dict) or
6105
            "mac" not in row or
6106
            "ip" not in row or
6107
            "bridge" not in row):
6108
          raise errors.OpPrereqError("Invalid contents of the"
6109
                                     " 'nics' parameter")
6110
      if not isinstance(self.op.disks, list):
6111
        raise errors.OpPrereqError("Invalid parameter 'disks'")
6112
      if len(self.op.disks) != 2:
6113
        raise errors.OpPrereqError("Only two-disk configurations supported")
6114
      for row in self.op.disks:
6115
        if (not isinstance(row, dict) or
6116
            "size" not in row or
6117
            not isinstance(row["size"], int) or
6118
            "mode" not in row or
6119
            row["mode"] not in ['r', 'w']):
6120
          raise errors.OpPrereqError("Invalid contents of the"
6121
                                     " 'disks' parameter")
6122
      if self.op.hypervisor is None:
6123
        self.op.hypervisor = self.cfg.GetHypervisorType()
6124
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
6125
      if not hasattr(self.op, "name"):
6126
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
6127
      fname = self.cfg.ExpandInstanceName(self.op.name)
6128
      if fname is None:
6129
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
6130
                                   self.op.name)
6131
      self.op.name = fname
6132
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
6133
    else:
6134
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
6135
                                 self.op.mode)
6136

    
6137
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
6138
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
6139
        raise errors.OpPrereqError("Missing allocator name")
6140
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
6141
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
6142
                                 self.op.direction)
6143

    
6144
  def Exec(self, feedback_fn):
6145
    """Run the allocator test.
6146

6147
    """
6148
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6149
      ial = IAllocator(self,
6150
                       mode=self.op.mode,
6151
                       name=self.op.name,
6152
                       mem_size=self.op.mem_size,
6153
                       disks=self.op.disks,
6154
                       disk_template=self.op.disk_template,
6155
                       os=self.op.os,
6156
                       tags=self.op.tags,
6157
                       nics=self.op.nics,
6158
                       vcpus=self.op.vcpus,
6159
                       hypervisor=self.op.hypervisor,
6160
                       )
6161
    else:
6162
      ial = IAllocator(self,
6163
                       mode=self.op.mode,
6164
                       name=self.op.name,
6165
                       relocate_from=list(self.relocate_from),
6166
                       )
6167

    
6168
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
6169
      result = ial.in_text
6170
    else:
6171
      ial.Run(self.op.allocator, validate=False)
6172
      result = ial.out_text
6173
    return result