Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 83d92ad8

History | View | Annotate | Download (253.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0613,W0201
25

    
26
import os
27
import os.path
28
import time
29
import tempfile
30
import re
31
import platform
32
import logging
33
import copy
34
import random
35

    
36
from ganeti import ssh
37
from ganeti import utils
38
from ganeti import errors
39
from ganeti import hypervisor
40
from ganeti import locking
41
from ganeti import constants
42
from ganeti import objects
43
from ganeti import opcodes
44
from ganeti import serializer
45
from ganeti import ssconf
46

    
47

    
48
class LogicalUnit(object):
49
  """Logical Unit base class.
50

51
  Subclasses must follow these rules:
52
    - implement ExpandNames
53
    - implement CheckPrereq
54
    - implement Exec
55
    - implement BuildHooksEnv
56
    - redefine HPATH and HTYPE
57
    - optionally redefine their run requirements:
58
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
59

60
  Note that all commands require root permissions.
61

62
  """
63
  HPATH = None
64
  HTYPE = None
65
  _OP_REQP = []
66
  REQ_BGL = True
67

    
68
  def __init__(self, processor, op, context, rpc):
69
    """Constructor for LogicalUnit.
70

71
    This needs to be overriden in derived classes in order to check op
72
    validity.
73

74
    """
75
    self.proc = processor
76
    self.op = op
77
    self.cfg = context.cfg
78
    self.context = context
79
    self.rpc = rpc
80
    # Dicts used to declare locking needs to mcpu
81
    self.needed_locks = None
82
    self.acquired_locks = {}
83
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
84
    self.add_locks = {}
85
    self.remove_locks = {}
86
    # Used to force good behavior when calling helper functions
87
    self.recalculate_locks = {}
88
    self.__ssh = None
89
    # logging
90
    self.LogWarning = processor.LogWarning
91
    self.LogInfo = processor.LogInfo
92

    
93
    for attr_name in self._OP_REQP:
94
      attr_val = getattr(op, attr_name, None)
95
      if attr_val is None:
96
        raise errors.OpPrereqError("Required parameter '%s' missing" %
97
                                   attr_name)
98
    self.CheckArguments()
99

    
100
  def __GetSSH(self):
101
    """Returns the SshRunner object
102

103
    """
104
    if not self.__ssh:
105
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
106
    return self.__ssh
107

    
108
  ssh = property(fget=__GetSSH)
109

    
110
  def CheckArguments(self):
111
    """Check syntactic validity for the opcode arguments.
112

113
    This method is for doing a simple syntactic check and ensure
114
    validity of opcode parameters, without any cluster-related
115
    checks. While the same can be accomplished in ExpandNames and/or
116
    CheckPrereq, doing these separate is better because:
117

118
      - ExpandNames is left as as purely a lock-related function
119
      - CheckPrereq is run after we have aquired locks (and possible
120
        waited for them)
121

122
    The function is allowed to change the self.op attribute so that
123
    later methods can no longer worry about missing parameters.
124

125
    """
126
    pass
127

    
128
  def ExpandNames(self):
129
    """Expand names for this LU.
130

131
    This method is called before starting to execute the opcode, and it should
132
    update all the parameters of the opcode to their canonical form (e.g. a
133
    short node name must be fully expanded after this method has successfully
134
    completed). This way locking, hooks, logging, ecc. can work correctly.
135

136
    LUs which implement this method must also populate the self.needed_locks
137
    member, as a dict with lock levels as keys, and a list of needed lock names
138
    as values. Rules:
139

140
      - use an empty dict if you don't need any lock
141
      - if you don't need any lock at a particular level omit that level
142
      - don't put anything for the BGL level
143
      - if you want all locks at a level use locking.ALL_SET as a value
144

145
    If you need to share locks (rather than acquire them exclusively) at one
146
    level you can modify self.share_locks, setting a true value (usually 1) for
147
    that level. By default locks are not shared.
148

149
    Examples::
150

151
      # Acquire all nodes and one instance
152
      self.needed_locks = {
153
        locking.LEVEL_NODE: locking.ALL_SET,
154
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
155
      }
156
      # Acquire just two nodes
157
      self.needed_locks = {
158
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
159
      }
160
      # Acquire no locks
161
      self.needed_locks = {} # No, you can't leave it to the default value None
162

163
    """
164
    # The implementation of this method is mandatory only if the new LU is
165
    # concurrent, so that old LUs don't need to be changed all at the same
166
    # time.
167
    if self.REQ_BGL:
168
      self.needed_locks = {} # Exclusive LUs don't need locks.
169
    else:
170
      raise NotImplementedError
171

    
172
  def DeclareLocks(self, level):
173
    """Declare LU locking needs for a level
174

175
    While most LUs can just declare their locking needs at ExpandNames time,
176
    sometimes there's the need to calculate some locks after having acquired
177
    the ones before. This function is called just before acquiring locks at a
178
    particular level, but after acquiring the ones at lower levels, and permits
179
    such calculations. It can be used to modify self.needed_locks, and by
180
    default it does nothing.
181

182
    This function is only called if you have something already set in
183
    self.needed_locks for the level.
184

185
    @param level: Locking level which is going to be locked
186
    @type level: member of ganeti.locking.LEVELS
187

188
    """
189

    
190
  def CheckPrereq(self):
191
    """Check prerequisites for this LU.
192

193
    This method should check that the prerequisites for the execution
194
    of this LU are fulfilled. It can do internode communication, but
195
    it should be idempotent - no cluster or system changes are
196
    allowed.
197

198
    The method should raise errors.OpPrereqError in case something is
199
    not fulfilled. Its return value is ignored.
200

201
    This method should also update all the parameters of the opcode to
202
    their canonical form if it hasn't been done by ExpandNames before.
203

204
    """
205
    raise NotImplementedError
206

    
207
  def Exec(self, feedback_fn):
208
    """Execute the LU.
209

210
    This method should implement the actual work. It should raise
211
    errors.OpExecError for failures that are somewhat dealt with in
212
    code, or expected.
213

214
    """
215
    raise NotImplementedError
216

    
217
  def BuildHooksEnv(self):
218
    """Build hooks environment for this LU.
219

220
    This method should return a three-node tuple consisting of: a dict
221
    containing the environment that will be used for running the
222
    specific hook for this LU, a list of node names on which the hook
223
    should run before the execution, and a list of node names on which
224
    the hook should run after the execution.
225

226
    The keys of the dict must not have 'GANETI_' prefixed as this will
227
    be handled in the hooks runner. Also note additional keys will be
228
    added by the hooks runner. If the LU doesn't define any
229
    environment, an empty dict (and not None) should be returned.
230

231
    No nodes should be returned as an empty list (and not None).
232

233
    Note that if the HPATH for a LU class is None, this function will
234
    not be called.
235

236
    """
237
    raise NotImplementedError
238

    
239
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
240
    """Notify the LU about the results of its hooks.
241

242
    This method is called every time a hooks phase is executed, and notifies
243
    the Logical Unit about the hooks' result. The LU can then use it to alter
244
    its result based on the hooks.  By default the method does nothing and the
245
    previous result is passed back unchanged but any LU can define it if it
246
    wants to use the local cluster hook-scripts somehow.
247

248
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
249
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
250
    @param hook_results: the results of the multi-node hooks rpc call
251
    @param feedback_fn: function used send feedback back to the caller
252
    @param lu_result: the previous Exec result this LU had, or None
253
        in the PRE phase
254
    @return: the new Exec result, based on the previous result
255
        and hook results
256

257
    """
258
    return lu_result
259

    
260
  def _ExpandAndLockInstance(self):
261
    """Helper function to expand and lock an instance.
262

263
    Many LUs that work on an instance take its name in self.op.instance_name
264
    and need to expand it and then declare the expanded name for locking. This
265
    function does it, and then updates self.op.instance_name to the expanded
266
    name. It also initializes needed_locks as a dict, if this hasn't been done
267
    before.
268

269
    """
270
    if self.needed_locks is None:
271
      self.needed_locks = {}
272
    else:
273
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
274
        "_ExpandAndLockInstance called with instance-level locks set"
275
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
276
    if expanded_name is None:
277
      raise errors.OpPrereqError("Instance '%s' not known" %
278
                                  self.op.instance_name)
279
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
280
    self.op.instance_name = expanded_name
281

    
282
  def _LockInstancesNodes(self, primary_only=False):
283
    """Helper function to declare instances' nodes for locking.
284

285
    This function should be called after locking one or more instances to lock
286
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
287
    with all primary or secondary nodes for instances already locked and
288
    present in self.needed_locks[locking.LEVEL_INSTANCE].
289

290
    It should be called from DeclareLocks, and for safety only works if
291
    self.recalculate_locks[locking.LEVEL_NODE] is set.
292

293
    In the future it may grow parameters to just lock some instance's nodes, or
294
    to just lock primaries or secondary nodes, if needed.
295

296
    If should be called in DeclareLocks in a way similar to::
297

298
      if level == locking.LEVEL_NODE:
299
        self._LockInstancesNodes()
300

301
    @type primary_only: boolean
302
    @param primary_only: only lock primary nodes of locked instances
303

304
    """
305
    assert locking.LEVEL_NODE in self.recalculate_locks, \
306
      "_LockInstancesNodes helper function called with no nodes to recalculate"
307

    
308
    # TODO: check if we're really been called with the instance locks held
309

    
310
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
311
    # future we might want to have different behaviors depending on the value
312
    # of self.recalculate_locks[locking.LEVEL_NODE]
313
    wanted_nodes = []
314
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
315
      instance = self.context.cfg.GetInstanceInfo(instance_name)
316
      wanted_nodes.append(instance.primary_node)
317
      if not primary_only:
318
        wanted_nodes.extend(instance.secondary_nodes)
319

    
320
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
321
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
322
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
323
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
324

    
325
    del self.recalculate_locks[locking.LEVEL_NODE]
326

    
327

    
328
class NoHooksLU(LogicalUnit):
329
  """Simple LU which runs no hooks.
330

331
  This LU is intended as a parent for other LogicalUnits which will
332
  run no hooks, in order to reduce duplicate code.
333

334
  """
335
  HPATH = None
336
  HTYPE = None
337

    
338

    
339
def _GetWantedNodes(lu, nodes):
340
  """Returns list of checked and expanded node names.
341

342
  @type lu: L{LogicalUnit}
343
  @param lu: the logical unit on whose behalf we execute
344
  @type nodes: list
345
  @param nodes: list of node names or None for all nodes
346
  @rtype: list
347
  @return: the list of nodes, sorted
348
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
349

350
  """
351
  if not isinstance(nodes, list):
352
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
353

    
354
  if not nodes:
355
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
356
      " non-empty list of nodes whose name is to be expanded.")
357

    
358
  wanted = []
359
  for name in nodes:
360
    node = lu.cfg.ExpandNodeName(name)
361
    if node is None:
362
      raise errors.OpPrereqError("No such node name '%s'" % name)
363
    wanted.append(node)
364

    
365
  return utils.NiceSort(wanted)
366

    
367

    
368
def _GetWantedInstances(lu, instances):
369
  """Returns list of checked and expanded instance names.
370

371
  @type lu: L{LogicalUnit}
372
  @param lu: the logical unit on whose behalf we execute
373
  @type instances: list
374
  @param instances: list of instance names or None for all instances
375
  @rtype: list
376
  @return: the list of instances, sorted
377
  @raise errors.OpPrereqError: if the instances parameter is wrong type
378
  @raise errors.OpPrereqError: if any of the passed instances is not found
379

380
  """
381
  if not isinstance(instances, list):
382
    raise errors.OpPrereqError("Invalid argument type 'instances'")
383

    
384
  if instances:
385
    wanted = []
386

    
387
    for name in instances:
388
      instance = lu.cfg.ExpandInstanceName(name)
389
      if instance is None:
390
        raise errors.OpPrereqError("No such instance name '%s'" % name)
391
      wanted.append(instance)
392

    
393
  else:
394
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
395
  return wanted
396

    
397

    
398
def _CheckOutputFields(static, dynamic, selected):
399
  """Checks whether all selected fields are valid.
400

401
  @type static: L{utils.FieldSet}
402
  @param static: static fields set
403
  @type dynamic: L{utils.FieldSet}
404
  @param dynamic: dynamic fields set
405

406
  """
407
  f = utils.FieldSet()
408
  f.Extend(static)
409
  f.Extend(dynamic)
410

    
411
  delta = f.NonMatching(selected)
412
  if delta:
413
    raise errors.OpPrereqError("Unknown output fields selected: %s"
414
                               % ",".join(delta))
415

    
416

    
417
def _CheckBooleanOpField(op, name):
418
  """Validates boolean opcode parameters.
419

420
  This will ensure that an opcode parameter is either a boolean value,
421
  or None (but that it always exists).
422

423
  """
424
  val = getattr(op, name, None)
425
  if not (val is None or isinstance(val, bool)):
426
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
427
                               (name, str(val)))
428
  setattr(op, name, val)
429

    
430

    
431
def _CheckNodeOnline(lu, node):
432
  """Ensure that a given node is online.
433

434
  @param lu: the LU on behalf of which we make the check
435
  @param node: the node to check
436
  @raise errors.OpPrereqError: if the node is offline
437

438
  """
439
  if lu.cfg.GetNodeInfo(node).offline:
440
    raise errors.OpPrereqError("Can't use offline node %s" % node)
441

    
442

    
443
def _CheckNodeNotDrained(lu, node):
444
  """Ensure that a given node is not drained.
445

446
  @param lu: the LU on behalf of which we make the check
447
  @param node: the node to check
448
  @raise errors.OpPrereqError: if the node is drained
449

450
  """
451
  if lu.cfg.GetNodeInfo(node).drained:
452
    raise errors.OpPrereqError("Can't use drained node %s" % node)
453

    
454

    
455
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
456
                          memory, vcpus, nics, disk_template, disks):
457
  """Builds instance related env variables for hooks
458

459
  This builds the hook environment from individual variables.
460

461
  @type name: string
462
  @param name: the name of the instance
463
  @type primary_node: string
464
  @param primary_node: the name of the instance's primary node
465
  @type secondary_nodes: list
466
  @param secondary_nodes: list of secondary nodes as strings
467
  @type os_type: string
468
  @param os_type: the name of the instance's OS
469
  @type status: boolean
470
  @param status: the should_run status of the instance
471
  @type memory: string
472
  @param memory: the memory size of the instance
473
  @type vcpus: string
474
  @param vcpus: the count of VCPUs the instance has
475
  @type nics: list
476
  @param nics: list of tuples (ip, bridge, mac) representing
477
      the NICs the instance  has
478
  @type disk_template: string
479
  @param disk_template: the distk template of the instance
480
  @type disks: list
481
  @param disks: the list of (size, mode) pairs
482
  @rtype: dict
483
  @return: the hook environment for this instance
484

485
  """
486
  if status:
487
    str_status = "up"
488
  else:
489
    str_status = "down"
490
  env = {
491
    "OP_TARGET": name,
492
    "INSTANCE_NAME": name,
493
    "INSTANCE_PRIMARY": primary_node,
494
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
495
    "INSTANCE_OS_TYPE": os_type,
496
    "INSTANCE_STATUS": str_status,
497
    "INSTANCE_MEMORY": memory,
498
    "INSTANCE_VCPUS": vcpus,
499
    "INSTANCE_DISK_TEMPLATE": disk_template,
500
  }
501

    
502
  if nics:
503
    nic_count = len(nics)
504
    for idx, (ip, mac, mode, link) in enumerate(nics):
505
      if ip is None:
506
        ip = ""
507
      env["INSTANCE_NIC%d_IP" % idx] = ip
508
      env["INSTANCE_NIC%d_MAC" % idx] = mac
509
      env["INSTANCE_NIC%d_MODE" % idx] = mode
510
      env["INSTANCE_NIC%d_LINK" % idx] = link
511
      if mode == constants.NIC_MODE_BRIDGED:
512
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
513
  else:
514
    nic_count = 0
515

    
516
  env["INSTANCE_NIC_COUNT"] = nic_count
517

    
518
  if disks:
519
    disk_count = len(disks)
520
    for idx, (size, mode) in enumerate(disks):
521
      env["INSTANCE_DISK%d_SIZE" % idx] = size
522
      env["INSTANCE_DISK%d_MODE" % idx] = mode
523
  else:
524
    disk_count = 0
525

    
526
  env["INSTANCE_DISK_COUNT"] = disk_count
527

    
528
  return env
529

    
530
def _PreBuildNICHooksList(lu, nics):
531
  """Build a list of nic information tuples.
532

533
  This list is suitable to be passed to _BuildInstanceHookEnv.
534

535
  @type lu:  L{LogicalUnit}
536
  @param lu: the logical unit on whose behalf we execute
537
  @type nics: list of L{objects.NIC}
538
  @param nics: list of nics to convert to hooks tuples
539

540
  """
541
  hooks_nics = []
542
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
543
  for nic in nics:
544
    ip = nic.ip
545
    mac = nic.mac
546
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
547
    mode = filled_params[constants.NIC_MODE]
548
    link = filled_params[constants.NIC_LINK]
549
    hooks_nics.append((ip, mac, mode, link))
550
  return hooks_nics
551

    
552
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
553
  """Builds instance related env variables for hooks from an object.
554

555
  @type lu: L{LogicalUnit}
556
  @param lu: the logical unit on whose behalf we execute
557
  @type instance: L{objects.Instance}
558
  @param instance: the instance for which we should build the
559
      environment
560
  @type override: dict
561
  @param override: dictionary with key/values that will override
562
      our values
563
  @rtype: dict
564
  @return: the hook environment dictionary
565

566
  """
567
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
568
  args = {
569
    'name': instance.name,
570
    'primary_node': instance.primary_node,
571
    'secondary_nodes': instance.secondary_nodes,
572
    'os_type': instance.os,
573
    'status': instance.admin_up,
574
    'memory': bep[constants.BE_MEMORY],
575
    'vcpus': bep[constants.BE_VCPUS],
576
    'nics': _PreBuildNICHooksList(lu, instance.nics),
577
    'disk_template': instance.disk_template,
578
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
579
  }
580
  if override:
581
    args.update(override)
582
  return _BuildInstanceHookEnv(**args)
583

    
584

    
585
def _AdjustCandidatePool(lu):
586
  """Adjust the candidate pool after node operations.
587

588
  """
589
  mod_list = lu.cfg.MaintainCandidatePool()
590
  if mod_list:
591
    lu.LogInfo("Promoted nodes to master candidate role: %s",
592
               ", ".join(node.name for node in mod_list))
593
    for name in mod_list:
594
      lu.context.ReaddNode(name)
595
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
596
  if mc_now > mc_max:
597
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
598
               (mc_now, mc_max))
599

    
600

    
601
def _CheckNicsBridgesExist(lu, target_nics, target_node,
602
                               profile=constants.PP_DEFAULT):
603
  """Check that the brigdes needed by a list of nics exist.
604

605
  """
606
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
607
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
608
                for nic in target_nics]
609
  brlist = [params[constants.NIC_LINK] for params in paramslist
610
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
611
  if brlist:
612
    result = lu.rpc.call_bridges_exist(target_node, brlist)
613
    msg = result.RemoteFailMsg()
614
    if msg:
615
      raise errors.OpPrereqError("Error checking bridges on destination node"
616
                                 " '%s': %s" % (target_node, msg))
617

    
618

    
619
def _CheckInstanceBridgesExist(lu, instance, node=None):
620
  """Check that the brigdes needed by an instance exist.
621

622
  """
623
  if node is None:
624
    node=instance.primary_node
625
  _CheckNicsBridgesExist(lu, instance.nics, node)
626

    
627

    
628
class LUDestroyCluster(NoHooksLU):
629
  """Logical unit for destroying the cluster.
630

631
  """
632
  _OP_REQP = []
633

    
634
  def CheckPrereq(self):
635
    """Check prerequisites.
636

637
    This checks whether the cluster is empty.
638

639
    Any errors are signalled by raising errors.OpPrereqError.
640

641
    """
642
    master = self.cfg.GetMasterNode()
643

    
644
    nodelist = self.cfg.GetNodeList()
645
    if len(nodelist) != 1 or nodelist[0] != master:
646
      raise errors.OpPrereqError("There are still %d node(s) in"
647
                                 " this cluster." % (len(nodelist) - 1))
648
    instancelist = self.cfg.GetInstanceList()
649
    if instancelist:
650
      raise errors.OpPrereqError("There are still %d instance(s) in"
651
                                 " this cluster." % len(instancelist))
652

    
653
  def Exec(self, feedback_fn):
654
    """Destroys the cluster.
655

656
    """
657
    master = self.cfg.GetMasterNode()
658
    result = self.rpc.call_node_stop_master(master, False)
659
    msg = result.RemoteFailMsg()
660
    if msg:
661
      raise errors.OpExecError("Could not disable the master role: %s" % msg)
662
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
663
    utils.CreateBackup(priv_key)
664
    utils.CreateBackup(pub_key)
665
    return master
666

    
667

    
668
class LUVerifyCluster(LogicalUnit):
669
  """Verifies the cluster status.
670

671
  """
672
  HPATH = "cluster-verify"
673
  HTYPE = constants.HTYPE_CLUSTER
674
  _OP_REQP = ["skip_checks"]
675
  REQ_BGL = False
676

    
677
  def ExpandNames(self):
678
    self.needed_locks = {
679
      locking.LEVEL_NODE: locking.ALL_SET,
680
      locking.LEVEL_INSTANCE: locking.ALL_SET,
681
    }
682
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
683

    
684
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
685
                  node_result, feedback_fn, master_files,
686
                  drbd_map, vg_name):
687
    """Run multiple tests against a node.
688

689
    Test list:
690

691
      - compares ganeti version
692
      - checks vg existance and size > 20G
693
      - checks config file checksum
694
      - checks ssh to other nodes
695

696
    @type nodeinfo: L{objects.Node}
697
    @param nodeinfo: the node to check
698
    @param file_list: required list of files
699
    @param local_cksum: dictionary of local files and their checksums
700
    @param node_result: the results from the node
701
    @param feedback_fn: function used to accumulate results
702
    @param master_files: list of files that only masters should have
703
    @param drbd_map: the useddrbd minors for this node, in
704
        form of minor: (instance, must_exist) which correspond to instances
705
        and their running status
706
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
707

708
    """
709
    node = nodeinfo.name
710

    
711
    # main result, node_result should be a non-empty dict
712
    if not node_result or not isinstance(node_result, dict):
713
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
714
      return True
715

    
716
    # compares ganeti version
717
    local_version = constants.PROTOCOL_VERSION
718
    remote_version = node_result.get('version', None)
719
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
720
            len(remote_version) == 2):
721
      feedback_fn("  - ERROR: connection to %s failed" % (node))
722
      return True
723

    
724
    if local_version != remote_version[0]:
725
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
726
                  " node %s %s" % (local_version, node, remote_version[0]))
727
      return True
728

    
729
    # node seems compatible, we can actually try to look into its results
730

    
731
    bad = False
732

    
733
    # full package version
734
    if constants.RELEASE_VERSION != remote_version[1]:
735
      feedback_fn("  - WARNING: software version mismatch: master %s,"
736
                  " node %s %s" %
737
                  (constants.RELEASE_VERSION, node, remote_version[1]))
738

    
739
    # checks vg existence and size > 20G
740
    if vg_name is not None:
741
      vglist = node_result.get(constants.NV_VGLIST, None)
742
      if not vglist:
743
        feedback_fn("  - ERROR: unable to check volume groups on node %s." %
744
                        (node,))
745
        bad = True
746
      else:
747
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
748
                                              constants.MIN_VG_SIZE)
749
        if vgstatus:
750
          feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
751
          bad = True
752

    
753
    # checks config file checksum
754

    
755
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
756
    if not isinstance(remote_cksum, dict):
757
      bad = True
758
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
759
    else:
760
      for file_name in file_list:
761
        node_is_mc = nodeinfo.master_candidate
762
        must_have_file = file_name not in master_files
763
        if file_name not in remote_cksum:
764
          if node_is_mc or must_have_file:
765
            bad = True
766
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
767
        elif remote_cksum[file_name] != local_cksum[file_name]:
768
          if node_is_mc or must_have_file:
769
            bad = True
770
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
771
          else:
772
            # not candidate and this is not a must-have file
773
            bad = True
774
            feedback_fn("  - ERROR: non master-candidate has old/wrong file"
775
                        " '%s'" % file_name)
776
        else:
777
          # all good, except non-master/non-must have combination
778
          if not node_is_mc and not must_have_file:
779
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
780
                        " candidates" % file_name)
781

    
782
    # checks ssh to any
783

    
784
    if constants.NV_NODELIST not in node_result:
785
      bad = True
786
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
787
    else:
788
      if node_result[constants.NV_NODELIST]:
789
        bad = True
790
        for node in node_result[constants.NV_NODELIST]:
791
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
792
                          (node, node_result[constants.NV_NODELIST][node]))
793

    
794
    if constants.NV_NODENETTEST not in node_result:
795
      bad = True
796
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
797
    else:
798
      if node_result[constants.NV_NODENETTEST]:
799
        bad = True
800
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
801
        for node in nlist:
802
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
803
                          (node, node_result[constants.NV_NODENETTEST][node]))
804

    
805
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
806
    if isinstance(hyp_result, dict):
807
      for hv_name, hv_result in hyp_result.iteritems():
808
        if hv_result is not None:
809
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
810
                      (hv_name, hv_result))
811

    
812
    # check used drbd list
813
    if vg_name is not None:
814
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
815
      if not isinstance(used_minors, (tuple, list)):
816
        feedback_fn("  - ERROR: cannot parse drbd status file: %s" %
817
                    str(used_minors))
818
      else:
819
        for minor, (iname, must_exist) in drbd_map.items():
820
          if minor not in used_minors and must_exist:
821
            feedback_fn("  - ERROR: drbd minor %d of instance %s is"
822
                        " not active" % (minor, iname))
823
            bad = True
824
        for minor in used_minors:
825
          if minor not in drbd_map:
826
            feedback_fn("  - ERROR: unallocated drbd minor %d is in use" %
827
                        minor)
828
            bad = True
829

    
830
    return bad
831

    
832
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
833
                      node_instance, feedback_fn, n_offline):
834
    """Verify an instance.
835

836
    This function checks to see if the required block devices are
837
    available on the instance's node.
838

839
    """
840
    bad = False
841

    
842
    node_current = instanceconfig.primary_node
843

    
844
    node_vol_should = {}
845
    instanceconfig.MapLVsByNode(node_vol_should)
846

    
847
    for node in node_vol_should:
848
      if node in n_offline:
849
        # ignore missing volumes on offline nodes
850
        continue
851
      for volume in node_vol_should[node]:
852
        if node not in node_vol_is or volume not in node_vol_is[node]:
853
          feedback_fn("  - ERROR: volume %s missing on node %s" %
854
                          (volume, node))
855
          bad = True
856

    
857
    if instanceconfig.admin_up:
858
      if ((node_current not in node_instance or
859
          not instance in node_instance[node_current]) and
860
          node_current not in n_offline):
861
        feedback_fn("  - ERROR: instance %s not running on node %s" %
862
                        (instance, node_current))
863
        bad = True
864

    
865
    for node in node_instance:
866
      if (not node == node_current):
867
        if instance in node_instance[node]:
868
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
869
                          (instance, node))
870
          bad = True
871

    
872
    return bad
873

    
874
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
875
    """Verify if there are any unknown volumes in the cluster.
876

877
    The .os, .swap and backup volumes are ignored. All other volumes are
878
    reported as unknown.
879

880
    """
881
    bad = False
882

    
883
    for node in node_vol_is:
884
      for volume in node_vol_is[node]:
885
        if node not in node_vol_should or volume not in node_vol_should[node]:
886
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
887
                      (volume, node))
888
          bad = True
889
    return bad
890

    
891
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
892
    """Verify the list of running instances.
893

894
    This checks what instances are running but unknown to the cluster.
895

896
    """
897
    bad = False
898
    for node in node_instance:
899
      for runninginstance in node_instance[node]:
900
        if runninginstance not in instancelist:
901
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
902
                          (runninginstance, node))
903
          bad = True
904
    return bad
905

    
906
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
907
    """Verify N+1 Memory Resilience.
908

909
    Check that if one single node dies we can still start all the instances it
910
    was primary for.
911

912
    """
913
    bad = False
914

    
915
    for node, nodeinfo in node_info.iteritems():
916
      # This code checks that every node which is now listed as secondary has
917
      # enough memory to host all instances it is supposed to should a single
918
      # other node in the cluster fail.
919
      # FIXME: not ready for failover to an arbitrary node
920
      # FIXME: does not support file-backed instances
921
      # WARNING: we currently take into account down instances as well as up
922
      # ones, considering that even if they're down someone might want to start
923
      # them even in the event of a node failure.
924
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
925
        needed_mem = 0
926
        for instance in instances:
927
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
928
          if bep[constants.BE_AUTO_BALANCE]:
929
            needed_mem += bep[constants.BE_MEMORY]
930
        if nodeinfo['mfree'] < needed_mem:
931
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
932
                      " failovers should node %s fail" % (node, prinode))
933
          bad = True
934
    return bad
935

    
936
  def CheckPrereq(self):
937
    """Check prerequisites.
938

939
    Transform the list of checks we're going to skip into a set and check that
940
    all its members are valid.
941

942
    """
943
    self.skip_set = frozenset(self.op.skip_checks)
944
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
945
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
946

    
947
  def BuildHooksEnv(self):
948
    """Build hooks env.
949

950
    Cluster-Verify hooks just rone in the post phase and their failure makes
951
    the output be logged in the verify output and the verification to fail.
952

953
    """
954
    all_nodes = self.cfg.GetNodeList()
955
    env = {
956
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
957
      }
958
    for node in self.cfg.GetAllNodesInfo().values():
959
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
960

    
961
    return env, [], all_nodes
962

    
963
  def Exec(self, feedback_fn):
964
    """Verify integrity of cluster, performing various test on nodes.
965

966
    """
967
    bad = False
968
    feedback_fn("* Verifying global settings")
969
    for msg in self.cfg.VerifyConfig():
970
      feedback_fn("  - ERROR: %s" % msg)
971

    
972
    vg_name = self.cfg.GetVGName()
973
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
974
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
975
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
976
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
977
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
978
                        for iname in instancelist)
979
    i_non_redundant = [] # Non redundant instances
980
    i_non_a_balanced = [] # Non auto-balanced instances
981
    n_offline = [] # List of offline nodes
982
    n_drained = [] # List of nodes being drained
983
    node_volume = {}
984
    node_instance = {}
985
    node_info = {}
986
    instance_cfg = {}
987

    
988
    # FIXME: verify OS list
989
    # do local checksums
990
    master_files = [constants.CLUSTER_CONF_FILE]
991

    
992
    file_names = ssconf.SimpleStore().GetFileList()
993
    file_names.append(constants.SSL_CERT_FILE)
994
    file_names.append(constants.RAPI_CERT_FILE)
995
    file_names.extend(master_files)
996

    
997
    local_checksums = utils.FingerprintFiles(file_names)
998

    
999
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1000
    node_verify_param = {
1001
      constants.NV_FILELIST: file_names,
1002
      constants.NV_NODELIST: [node.name for node in nodeinfo
1003
                              if not node.offline],
1004
      constants.NV_HYPERVISOR: hypervisors,
1005
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1006
                                  node.secondary_ip) for node in nodeinfo
1007
                                 if not node.offline],
1008
      constants.NV_INSTANCELIST: hypervisors,
1009
      constants.NV_VERSION: None,
1010
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1011
      }
1012
    if vg_name is not None:
1013
      node_verify_param[constants.NV_VGLIST] = None
1014
      node_verify_param[constants.NV_LVLIST] = vg_name
1015
      node_verify_param[constants.NV_DRBDLIST] = None
1016
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1017
                                           self.cfg.GetClusterName())
1018

    
1019
    cluster = self.cfg.GetClusterInfo()
1020
    master_node = self.cfg.GetMasterNode()
1021
    all_drbd_map = self.cfg.ComputeDRBDMap()
1022

    
1023
    for node_i in nodeinfo:
1024
      node = node_i.name
1025

    
1026
      if node_i.offline:
1027
        feedback_fn("* Skipping offline node %s" % (node,))
1028
        n_offline.append(node)
1029
        continue
1030

    
1031
      if node == master_node:
1032
        ntype = "master"
1033
      elif node_i.master_candidate:
1034
        ntype = "master candidate"
1035
      elif node_i.drained:
1036
        ntype = "drained"
1037
        n_drained.append(node)
1038
      else:
1039
        ntype = "regular"
1040
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1041

    
1042
      msg = all_nvinfo[node].RemoteFailMsg()
1043
      if msg:
1044
        feedback_fn("  - ERROR: while contacting node %s: %s" % (node, msg))
1045
        bad = True
1046
        continue
1047

    
1048
      nresult = all_nvinfo[node].payload
1049
      node_drbd = {}
1050
      for minor, instance in all_drbd_map[node].items():
1051
        if instance not in instanceinfo:
1052
          feedback_fn("  - ERROR: ghost instance '%s' in temporary DRBD map" %
1053
                      instance)
1054
          # ghost instance should not be running, but otherwise we
1055
          # don't give double warnings (both ghost instance and
1056
          # unallocated minor in use)
1057
          node_drbd[minor] = (instance, False)
1058
        else:
1059
          instance = instanceinfo[instance]
1060
          node_drbd[minor] = (instance.name, instance.admin_up)
1061
      result = self._VerifyNode(node_i, file_names, local_checksums,
1062
                                nresult, feedback_fn, master_files,
1063
                                node_drbd, vg_name)
1064
      bad = bad or result
1065

    
1066
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1067
      if vg_name is None:
1068
        node_volume[node] = {}
1069
      elif isinstance(lvdata, basestring):
1070
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
1071
                    (node, utils.SafeEncode(lvdata)))
1072
        bad = True
1073
        node_volume[node] = {}
1074
      elif not isinstance(lvdata, dict):
1075
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
1076
        bad = True
1077
        continue
1078
      else:
1079
        node_volume[node] = lvdata
1080

    
1081
      # node_instance
1082
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1083
      if not isinstance(idata, list):
1084
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
1085
                    (node,))
1086
        bad = True
1087
        continue
1088

    
1089
      node_instance[node] = idata
1090

    
1091
      # node_info
1092
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1093
      if not isinstance(nodeinfo, dict):
1094
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1095
        bad = True
1096
        continue
1097

    
1098
      try:
1099
        node_info[node] = {
1100
          "mfree": int(nodeinfo['memory_free']),
1101
          "pinst": [],
1102
          "sinst": [],
1103
          # dictionary holding all instances this node is secondary for,
1104
          # grouped by their primary node. Each key is a cluster node, and each
1105
          # value is a list of instances which have the key as primary and the
1106
          # current node as secondary.  this is handy to calculate N+1 memory
1107
          # availability if you can only failover from a primary to its
1108
          # secondary.
1109
          "sinst-by-pnode": {},
1110
        }
1111
        # FIXME: devise a free space model for file based instances as well
1112
        if vg_name is not None:
1113
          if (constants.NV_VGLIST not in nresult or
1114
              vg_name not in nresult[constants.NV_VGLIST]):
1115
            feedback_fn("  - ERROR: node %s didn't return data for the"
1116
                        " volume group '%s' - it is either missing or broken" %
1117
                        (node, vg_name))
1118
            bad = True
1119
            continue
1120
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1121
      except (ValueError, KeyError):
1122
        feedback_fn("  - ERROR: invalid nodeinfo value returned"
1123
                    " from node %s" % (node,))
1124
        bad = True
1125
        continue
1126

    
1127
    node_vol_should = {}
1128

    
1129
    for instance in instancelist:
1130
      feedback_fn("* Verifying instance %s" % instance)
1131
      inst_config = instanceinfo[instance]
1132
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1133
                                     node_instance, feedback_fn, n_offline)
1134
      bad = bad or result
1135
      inst_nodes_offline = []
1136

    
1137
      inst_config.MapLVsByNode(node_vol_should)
1138

    
1139
      instance_cfg[instance] = inst_config
1140

    
1141
      pnode = inst_config.primary_node
1142
      if pnode in node_info:
1143
        node_info[pnode]['pinst'].append(instance)
1144
      elif pnode not in n_offline:
1145
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1146
                    " %s failed" % (instance, pnode))
1147
        bad = True
1148

    
1149
      if pnode in n_offline:
1150
        inst_nodes_offline.append(pnode)
1151

    
1152
      # If the instance is non-redundant we cannot survive losing its primary
1153
      # node, so we are not N+1 compliant. On the other hand we have no disk
1154
      # templates with more than one secondary so that situation is not well
1155
      # supported either.
1156
      # FIXME: does not support file-backed instances
1157
      if len(inst_config.secondary_nodes) == 0:
1158
        i_non_redundant.append(instance)
1159
      elif len(inst_config.secondary_nodes) > 1:
1160
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1161
                    % instance)
1162

    
1163
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1164
        i_non_a_balanced.append(instance)
1165

    
1166
      for snode in inst_config.secondary_nodes:
1167
        if snode in node_info:
1168
          node_info[snode]['sinst'].append(instance)
1169
          if pnode not in node_info[snode]['sinst-by-pnode']:
1170
            node_info[snode]['sinst-by-pnode'][pnode] = []
1171
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1172
        elif snode not in n_offline:
1173
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1174
                      " %s failed" % (instance, snode))
1175
          bad = True
1176
        if snode in n_offline:
1177
          inst_nodes_offline.append(snode)
1178

    
1179
      if inst_nodes_offline:
1180
        # warn that the instance lives on offline nodes, and set bad=True
1181
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1182
                    ", ".join(inst_nodes_offline))
1183
        bad = True
1184

    
1185
    feedback_fn("* Verifying orphan volumes")
1186
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1187
                                       feedback_fn)
1188
    bad = bad or result
1189

    
1190
    feedback_fn("* Verifying remaining instances")
1191
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1192
                                         feedback_fn)
1193
    bad = bad or result
1194

    
1195
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1196
      feedback_fn("* Verifying N+1 Memory redundancy")
1197
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1198
      bad = bad or result
1199

    
1200
    feedback_fn("* Other Notes")
1201
    if i_non_redundant:
1202
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1203
                  % len(i_non_redundant))
1204

    
1205
    if i_non_a_balanced:
1206
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1207
                  % len(i_non_a_balanced))
1208

    
1209
    if n_offline:
1210
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1211

    
1212
    if n_drained:
1213
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1214

    
1215
    return not bad
1216

    
1217
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1218
    """Analize the post-hooks' result
1219

1220
    This method analyses the hook result, handles it, and sends some
1221
    nicely-formatted feedback back to the user.
1222

1223
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1224
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1225
    @param hooks_results: the results of the multi-node hooks rpc call
1226
    @param feedback_fn: function used send feedback back to the caller
1227
    @param lu_result: previous Exec result
1228
    @return: the new Exec result, based on the previous result
1229
        and hook results
1230

1231
    """
1232
    # We only really run POST phase hooks, and are only interested in
1233
    # their results
1234
    if phase == constants.HOOKS_PHASE_POST:
1235
      # Used to change hooks' output to proper indentation
1236
      indent_re = re.compile('^', re.M)
1237
      feedback_fn("* Hooks Results")
1238
      if not hooks_results:
1239
        feedback_fn("  - ERROR: general communication failure")
1240
        lu_result = 1
1241
      else:
1242
        for node_name in hooks_results:
1243
          show_node_header = True
1244
          res = hooks_results[node_name]
1245
          if res.failed or res.data is False or not isinstance(res.data, list):
1246
            if res.offline:
1247
              # no need to warn or set fail return value
1248
              continue
1249
            feedback_fn("    Communication failure in hooks execution")
1250
            lu_result = 1
1251
            continue
1252
          for script, hkr, output in res.data:
1253
            if hkr == constants.HKR_FAIL:
1254
              # The node header is only shown once, if there are
1255
              # failing hooks on that node
1256
              if show_node_header:
1257
                feedback_fn("  Node %s:" % node_name)
1258
                show_node_header = False
1259
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1260
              output = indent_re.sub('      ', output)
1261
              feedback_fn("%s" % output)
1262
              lu_result = 1
1263

    
1264
      return lu_result
1265

    
1266

    
1267
class LUVerifyDisks(NoHooksLU):
1268
  """Verifies the cluster disks status.
1269

1270
  """
1271
  _OP_REQP = []
1272
  REQ_BGL = False
1273

    
1274
  def ExpandNames(self):
1275
    self.needed_locks = {
1276
      locking.LEVEL_NODE: locking.ALL_SET,
1277
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1278
    }
1279
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1280

    
1281
  def CheckPrereq(self):
1282
    """Check prerequisites.
1283

1284
    This has no prerequisites.
1285

1286
    """
1287
    pass
1288

    
1289
  def Exec(self, feedback_fn):
1290
    """Verify integrity of cluster disks.
1291

1292
    @rtype: tuple of three items
1293
    @return: a tuple of (dict of node-to-node_error, list of instances
1294
        which need activate-disks, dict of instance: (node, volume) for
1295
        missing volumes
1296

1297
    """
1298
    result = res_nodes, res_instances, res_missing = {}, [], {}
1299

    
1300
    vg_name = self.cfg.GetVGName()
1301
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1302
    instances = [self.cfg.GetInstanceInfo(name)
1303
                 for name in self.cfg.GetInstanceList()]
1304

    
1305
    nv_dict = {}
1306
    for inst in instances:
1307
      inst_lvs = {}
1308
      if (not inst.admin_up or
1309
          inst.disk_template not in constants.DTS_NET_MIRROR):
1310
        continue
1311
      inst.MapLVsByNode(inst_lvs)
1312
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1313
      for node, vol_list in inst_lvs.iteritems():
1314
        for vol in vol_list:
1315
          nv_dict[(node, vol)] = inst
1316

    
1317
    if not nv_dict:
1318
      return result
1319

    
1320
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1321

    
1322
    to_act = set()
1323
    for node in nodes:
1324
      # node_volume
1325
      node_res = node_lvs[node]
1326
      if node_res.offline:
1327
        continue
1328
      msg = node_res.RemoteFailMsg()
1329
      if msg:
1330
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1331
        res_nodes[node] = msg
1332
        continue
1333

    
1334
      lvs = node_res.payload
1335
      for lv_name, (_, lv_inactive, lv_online) in lvs.items():
1336
        inst = nv_dict.pop((node, lv_name), None)
1337
        if (not lv_online and inst is not None
1338
            and inst.name not in res_instances):
1339
          res_instances.append(inst.name)
1340

    
1341
    # any leftover items in nv_dict are missing LVs, let's arrange the
1342
    # data better
1343
    for key, inst in nv_dict.iteritems():
1344
      if inst.name not in res_missing:
1345
        res_missing[inst.name] = []
1346
      res_missing[inst.name].append(key)
1347

    
1348
    return result
1349

    
1350

    
1351
class LURenameCluster(LogicalUnit):
1352
  """Rename the cluster.
1353

1354
  """
1355
  HPATH = "cluster-rename"
1356
  HTYPE = constants.HTYPE_CLUSTER
1357
  _OP_REQP = ["name"]
1358

    
1359
  def BuildHooksEnv(self):
1360
    """Build hooks env.
1361

1362
    """
1363
    env = {
1364
      "OP_TARGET": self.cfg.GetClusterName(),
1365
      "NEW_NAME": self.op.name,
1366
      }
1367
    mn = self.cfg.GetMasterNode()
1368
    return env, [mn], [mn]
1369

    
1370
  def CheckPrereq(self):
1371
    """Verify that the passed name is a valid one.
1372

1373
    """
1374
    hostname = utils.HostInfo(self.op.name)
1375

    
1376
    new_name = hostname.name
1377
    self.ip = new_ip = hostname.ip
1378
    old_name = self.cfg.GetClusterName()
1379
    old_ip = self.cfg.GetMasterIP()
1380
    if new_name == old_name and new_ip == old_ip:
1381
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1382
                                 " cluster has changed")
1383
    if new_ip != old_ip:
1384
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1385
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1386
                                   " reachable on the network. Aborting." %
1387
                                   new_ip)
1388

    
1389
    self.op.name = new_name
1390

    
1391
  def Exec(self, feedback_fn):
1392
    """Rename the cluster.
1393

1394
    """
1395
    clustername = self.op.name
1396
    ip = self.ip
1397

    
1398
    # shutdown the master IP
1399
    master = self.cfg.GetMasterNode()
1400
    result = self.rpc.call_node_stop_master(master, False)
1401
    msg = result.RemoteFailMsg()
1402
    if msg:
1403
      raise errors.OpExecError("Could not disable the master role: %s" % msg)
1404

    
1405
    try:
1406
      cluster = self.cfg.GetClusterInfo()
1407
      cluster.cluster_name = clustername
1408
      cluster.master_ip = ip
1409
      self.cfg.Update(cluster)
1410

    
1411
      # update the known hosts file
1412
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1413
      node_list = self.cfg.GetNodeList()
1414
      try:
1415
        node_list.remove(master)
1416
      except ValueError:
1417
        pass
1418
      result = self.rpc.call_upload_file(node_list,
1419
                                         constants.SSH_KNOWN_HOSTS_FILE)
1420
      for to_node, to_result in result.iteritems():
1421
         msg = to_result.RemoteFailMsg()
1422
         if msg:
1423
           msg = ("Copy of file %s to node %s failed: %s" %
1424
                   (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
1425
           self.proc.LogWarning(msg)
1426

    
1427
    finally:
1428
      result = self.rpc.call_node_start_master(master, False)
1429
      msg = result.RemoteFailMsg()
1430
      if msg:
1431
        self.LogWarning("Could not re-enable the master role on"
1432
                        " the master, please restart manually: %s", msg)
1433

    
1434

    
1435
def _RecursiveCheckIfLVMBased(disk):
1436
  """Check if the given disk or its children are lvm-based.
1437

1438
  @type disk: L{objects.Disk}
1439
  @param disk: the disk to check
1440
  @rtype: booleean
1441
  @return: boolean indicating whether a LD_LV dev_type was found or not
1442

1443
  """
1444
  if disk.children:
1445
    for chdisk in disk.children:
1446
      if _RecursiveCheckIfLVMBased(chdisk):
1447
        return True
1448
  return disk.dev_type == constants.LD_LV
1449

    
1450

    
1451
class LUSetClusterParams(LogicalUnit):
1452
  """Change the parameters of the cluster.
1453

1454
  """
1455
  HPATH = "cluster-modify"
1456
  HTYPE = constants.HTYPE_CLUSTER
1457
  _OP_REQP = []
1458
  REQ_BGL = False
1459

    
1460
  def CheckArguments(self):
1461
    """Check parameters
1462

1463
    """
1464
    if not hasattr(self.op, "candidate_pool_size"):
1465
      self.op.candidate_pool_size = None
1466
    if self.op.candidate_pool_size is not None:
1467
      try:
1468
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1469
      except (ValueError, TypeError), err:
1470
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1471
                                   str(err))
1472
      if self.op.candidate_pool_size < 1:
1473
        raise errors.OpPrereqError("At least one master candidate needed")
1474

    
1475
  def ExpandNames(self):
1476
    # FIXME: in the future maybe other cluster params won't require checking on
1477
    # all nodes to be modified.
1478
    self.needed_locks = {
1479
      locking.LEVEL_NODE: locking.ALL_SET,
1480
    }
1481
    self.share_locks[locking.LEVEL_NODE] = 1
1482

    
1483
  def BuildHooksEnv(self):
1484
    """Build hooks env.
1485

1486
    """
1487
    env = {
1488
      "OP_TARGET": self.cfg.GetClusterName(),
1489
      "NEW_VG_NAME": self.op.vg_name,
1490
      }
1491
    mn = self.cfg.GetMasterNode()
1492
    return env, [mn], [mn]
1493

    
1494
  def CheckPrereq(self):
1495
    """Check prerequisites.
1496

1497
    This checks whether the given params don't conflict and
1498
    if the given volume group is valid.
1499

1500
    """
1501
    if self.op.vg_name is not None and not self.op.vg_name:
1502
      instances = self.cfg.GetAllInstancesInfo().values()
1503
      for inst in instances:
1504
        for disk in inst.disks:
1505
          if _RecursiveCheckIfLVMBased(disk):
1506
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1507
                                       " lvm-based instances exist")
1508

    
1509
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1510

    
1511
    # if vg_name not None, checks given volume group on all nodes
1512
    if self.op.vg_name:
1513
      vglist = self.rpc.call_vg_list(node_list)
1514
      for node in node_list:
1515
        msg = vglist[node].RemoteFailMsg()
1516
        if msg:
1517
          # ignoring down node
1518
          self.LogWarning("Error while gathering data on node %s"
1519
                          " (ignoring node): %s", node, msg)
1520
          continue
1521
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
1522
                                              self.op.vg_name,
1523
                                              constants.MIN_VG_SIZE)
1524
        if vgstatus:
1525
          raise errors.OpPrereqError("Error on node '%s': %s" %
1526
                                     (node, vgstatus))
1527

    
1528
    self.cluster = cluster = self.cfg.GetClusterInfo()
1529
    # validate params changes
1530
    if self.op.beparams:
1531
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1532
      self.new_beparams = objects.FillDict(
1533
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
1534

    
1535
    if self.op.nicparams:
1536
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
1537
      self.new_nicparams = objects.FillDict(
1538
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
1539
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
1540

    
1541
    # hypervisor list/parameters
1542
    self.new_hvparams = objects.FillDict(cluster.hvparams, {})
1543
    if self.op.hvparams:
1544
      if not isinstance(self.op.hvparams, dict):
1545
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1546
      for hv_name, hv_dict in self.op.hvparams.items():
1547
        if hv_name not in self.new_hvparams:
1548
          self.new_hvparams[hv_name] = hv_dict
1549
        else:
1550
          self.new_hvparams[hv_name].update(hv_dict)
1551

    
1552
    if self.op.enabled_hypervisors is not None:
1553
      self.hv_list = self.op.enabled_hypervisors
1554
    else:
1555
      self.hv_list = cluster.enabled_hypervisors
1556

    
1557
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1558
      # either the enabled list has changed, or the parameters have, validate
1559
      for hv_name, hv_params in self.new_hvparams.items():
1560
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1561
            (self.op.enabled_hypervisors and
1562
             hv_name in self.op.enabled_hypervisors)):
1563
          # either this is a new hypervisor, or its parameters have changed
1564
          hv_class = hypervisor.GetHypervisor(hv_name)
1565
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1566
          hv_class.CheckParameterSyntax(hv_params)
1567
          _CheckHVParams(self, node_list, hv_name, hv_params)
1568

    
1569
  def Exec(self, feedback_fn):
1570
    """Change the parameters of the cluster.
1571

1572
    """
1573
    if self.op.vg_name is not None:
1574
      new_volume = self.op.vg_name
1575
      if not new_volume:
1576
        new_volume = None
1577
      if new_volume != self.cfg.GetVGName():
1578
        self.cfg.SetVGName(new_volume)
1579
      else:
1580
        feedback_fn("Cluster LVM configuration already in desired"
1581
                    " state, not changing")
1582
    if self.op.hvparams:
1583
      self.cluster.hvparams = self.new_hvparams
1584
    if self.op.enabled_hypervisors is not None:
1585
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1586
    if self.op.beparams:
1587
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
1588
    if self.op.nicparams:
1589
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
1590

    
1591
    if self.op.candidate_pool_size is not None:
1592
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1593

    
1594
    self.cfg.Update(self.cluster)
1595

    
1596
    # we want to update nodes after the cluster so that if any errors
1597
    # happen, we have recorded and saved the cluster info
1598
    if self.op.candidate_pool_size is not None:
1599
      _AdjustCandidatePool(self)
1600

    
1601

    
1602
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
1603
  """Distribute additional files which are part of the cluster configuration.
1604

1605
  ConfigWriter takes care of distributing the config and ssconf files, but
1606
  there are more files which should be distributed to all nodes. This function
1607
  makes sure those are copied.
1608

1609
  @param lu: calling logical unit
1610
  @param additional_nodes: list of nodes not in the config to distribute to
1611

1612
  """
1613
  # 1. Gather target nodes
1614
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
1615
  dist_nodes = lu.cfg.GetNodeList()
1616
  if additional_nodes is not None:
1617
    dist_nodes.extend(additional_nodes)
1618
  if myself.name in dist_nodes:
1619
    dist_nodes.remove(myself.name)
1620
  # 2. Gather files to distribute
1621
  dist_files = set([constants.ETC_HOSTS,
1622
                    constants.SSH_KNOWN_HOSTS_FILE,
1623
                    constants.RAPI_CERT_FILE,
1624
                    constants.RAPI_USERS_FILE,
1625
                   ])
1626

    
1627
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
1628
  for hv_name in enabled_hypervisors:
1629
    hv_class = hypervisor.GetHypervisor(hv_name)
1630
    dist_files.update(hv_class.GetAncillaryFiles())
1631

    
1632
  # 3. Perform the files upload
1633
  for fname in dist_files:
1634
    if os.path.exists(fname):
1635
      result = lu.rpc.call_upload_file(dist_nodes, fname)
1636
      for to_node, to_result in result.items():
1637
         msg = to_result.RemoteFailMsg()
1638
         if msg:
1639
           msg = ("Copy of file %s to node %s failed: %s" %
1640
                   (fname, to_node, msg))
1641
           lu.proc.LogWarning(msg)
1642

    
1643

    
1644
class LURedistributeConfig(NoHooksLU):
1645
  """Force the redistribution of cluster configuration.
1646

1647
  This is a very simple LU.
1648

1649
  """
1650
  _OP_REQP = []
1651
  REQ_BGL = False
1652

    
1653
  def ExpandNames(self):
1654
    self.needed_locks = {
1655
      locking.LEVEL_NODE: locking.ALL_SET,
1656
    }
1657
    self.share_locks[locking.LEVEL_NODE] = 1
1658

    
1659
  def CheckPrereq(self):
1660
    """Check prerequisites.
1661

1662
    """
1663

    
1664
  def Exec(self, feedback_fn):
1665
    """Redistribute the configuration.
1666

1667
    """
1668
    self.cfg.Update(self.cfg.GetClusterInfo())
1669
    _RedistributeAncillaryFiles(self)
1670

    
1671

    
1672
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1673
  """Sleep and poll for an instance's disk to sync.
1674

1675
  """
1676
  if not instance.disks:
1677
    return True
1678

    
1679
  if not oneshot:
1680
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1681

    
1682
  node = instance.primary_node
1683

    
1684
  for dev in instance.disks:
1685
    lu.cfg.SetDiskID(dev, node)
1686

    
1687
  retries = 0
1688
  while True:
1689
    max_time = 0
1690
    done = True
1691
    cumul_degraded = False
1692
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1693
    msg = rstats.RemoteFailMsg()
1694
    if msg:
1695
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
1696
      retries += 1
1697
      if retries >= 10:
1698
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1699
                                 " aborting." % node)
1700
      time.sleep(6)
1701
      continue
1702
    rstats = rstats.payload
1703
    retries = 0
1704
    for i, mstat in enumerate(rstats):
1705
      if mstat is None:
1706
        lu.LogWarning("Can't compute data for node %s/%s",
1707
                           node, instance.disks[i].iv_name)
1708
        continue
1709
      # we ignore the ldisk parameter
1710
      perc_done, est_time, is_degraded, _ = mstat
1711
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1712
      if perc_done is not None:
1713
        done = False
1714
        if est_time is not None:
1715
          rem_time = "%d estimated seconds remaining" % est_time
1716
          max_time = est_time
1717
        else:
1718
          rem_time = "no time estimate"
1719
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1720
                        (instance.disks[i].iv_name, perc_done, rem_time))
1721
    if done or oneshot:
1722
      break
1723

    
1724
    time.sleep(min(60, max_time))
1725

    
1726
  if done:
1727
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1728
  return not cumul_degraded
1729

    
1730

    
1731
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1732
  """Check that mirrors are not degraded.
1733

1734
  The ldisk parameter, if True, will change the test from the
1735
  is_degraded attribute (which represents overall non-ok status for
1736
  the device(s)) to the ldisk (representing the local storage status).
1737

1738
  """
1739
  lu.cfg.SetDiskID(dev, node)
1740
  if ldisk:
1741
    idx = 6
1742
  else:
1743
    idx = 5
1744

    
1745
  result = True
1746
  if on_primary or dev.AssembleOnSecondary():
1747
    rstats = lu.rpc.call_blockdev_find(node, dev)
1748
    msg = rstats.RemoteFailMsg()
1749
    if msg:
1750
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1751
      result = False
1752
    elif not rstats.payload:
1753
      lu.LogWarning("Can't find disk on node %s", node)
1754
      result = False
1755
    else:
1756
      result = result and (not rstats.payload[idx])
1757
  if dev.children:
1758
    for child in dev.children:
1759
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1760

    
1761
  return result
1762

    
1763

    
1764
class LUDiagnoseOS(NoHooksLU):
1765
  """Logical unit for OS diagnose/query.
1766

1767
  """
1768
  _OP_REQP = ["output_fields", "names"]
1769
  REQ_BGL = False
1770
  _FIELDS_STATIC = utils.FieldSet()
1771
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1772

    
1773
  def ExpandNames(self):
1774
    if self.op.names:
1775
      raise errors.OpPrereqError("Selective OS query not supported")
1776

    
1777
    _CheckOutputFields(static=self._FIELDS_STATIC,
1778
                       dynamic=self._FIELDS_DYNAMIC,
1779
                       selected=self.op.output_fields)
1780

    
1781
    # Lock all nodes, in shared mode
1782
    # Temporary removal of locks, should be reverted later
1783
    # TODO: reintroduce locks when they are lighter-weight
1784
    self.needed_locks = {}
1785
    #self.share_locks[locking.LEVEL_NODE] = 1
1786
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1787

    
1788
  def CheckPrereq(self):
1789
    """Check prerequisites.
1790

1791
    """
1792

    
1793
  @staticmethod
1794
  def _DiagnoseByOS(node_list, rlist):
1795
    """Remaps a per-node return list into an a per-os per-node dictionary
1796

1797
    @param node_list: a list with the names of all nodes
1798
    @param rlist: a map with node names as keys and OS objects as values
1799

1800
    @rtype: dict
1801
    @return: a dictionary with osnames as keys and as value another map, with
1802
        nodes as keys and list of OS objects as values, eg::
1803

1804
          {"debian-etch": {"node1": [<object>,...],
1805
                           "node2": [<object>,]}
1806
          }
1807

1808
    """
1809
    all_os = {}
1810
    # we build here the list of nodes that didn't fail the RPC (at RPC
1811
    # level), so that nodes with a non-responding node daemon don't
1812
    # make all OSes invalid
1813
    good_nodes = [node_name for node_name in rlist
1814
                  if not rlist[node_name].RemoteFailMsg()]
1815
    for node_name, nr in rlist.items():
1816
      if nr.RemoteFailMsg() or not nr.payload:
1817
        continue
1818
      for os_serialized in nr.payload:
1819
        os_obj = objects.OS.FromDict(os_serialized)
1820
        if os_obj.name not in all_os:
1821
          # build a list of nodes for this os containing empty lists
1822
          # for each node in node_list
1823
          all_os[os_obj.name] = {}
1824
          for nname in good_nodes:
1825
            all_os[os_obj.name][nname] = []
1826
        all_os[os_obj.name][node_name].append(os_obj)
1827
    return all_os
1828

    
1829
  def Exec(self, feedback_fn):
1830
    """Compute the list of OSes.
1831

1832
    """
1833
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
1834
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1835
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1836
    output = []
1837
    for os_name, os_data in pol.items():
1838
      row = []
1839
      for field in self.op.output_fields:
1840
        if field == "name":
1841
          val = os_name
1842
        elif field == "valid":
1843
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1844
        elif field == "node_status":
1845
          val = {}
1846
          for node_name, nos_list in os_data.iteritems():
1847
            val[node_name] = [(v.status, v.path) for v in nos_list]
1848
        else:
1849
          raise errors.ParameterError(field)
1850
        row.append(val)
1851
      output.append(row)
1852

    
1853
    return output
1854

    
1855

    
1856
class LURemoveNode(LogicalUnit):
1857
  """Logical unit for removing a node.
1858

1859
  """
1860
  HPATH = "node-remove"
1861
  HTYPE = constants.HTYPE_NODE
1862
  _OP_REQP = ["node_name"]
1863

    
1864
  def BuildHooksEnv(self):
1865
    """Build hooks env.
1866

1867
    This doesn't run on the target node in the pre phase as a failed
1868
    node would then be impossible to remove.
1869

1870
    """
1871
    env = {
1872
      "OP_TARGET": self.op.node_name,
1873
      "NODE_NAME": self.op.node_name,
1874
      }
1875
    all_nodes = self.cfg.GetNodeList()
1876
    all_nodes.remove(self.op.node_name)
1877
    return env, all_nodes, all_nodes
1878

    
1879
  def CheckPrereq(self):
1880
    """Check prerequisites.
1881

1882
    This checks:
1883
     - the node exists in the configuration
1884
     - it does not have primary or secondary instances
1885
     - it's not the master
1886

1887
    Any errors are signalled by raising errors.OpPrereqError.
1888

1889
    """
1890
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1891
    if node is None:
1892
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1893

    
1894
    instance_list = self.cfg.GetInstanceList()
1895

    
1896
    masternode = self.cfg.GetMasterNode()
1897
    if node.name == masternode:
1898
      raise errors.OpPrereqError("Node is the master node,"
1899
                                 " you need to failover first.")
1900

    
1901
    for instance_name in instance_list:
1902
      instance = self.cfg.GetInstanceInfo(instance_name)
1903
      if node.name in instance.all_nodes:
1904
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1905
                                   " please remove first." % instance_name)
1906
    self.op.node_name = node.name
1907
    self.node = node
1908

    
1909
  def Exec(self, feedback_fn):
1910
    """Removes the node from the cluster.
1911

1912
    """
1913
    node = self.node
1914
    logging.info("Stopping the node daemon and removing configs from node %s",
1915
                 node.name)
1916

    
1917
    self.context.RemoveNode(node.name)
1918

    
1919
    result = self.rpc.call_node_leave_cluster(node.name)
1920
    msg = result.RemoteFailMsg()
1921
    if msg:
1922
      self.LogWarning("Errors encountered on the remote node while leaving"
1923
                      " the cluster: %s", msg)
1924

    
1925
    # Promote nodes to master candidate as needed
1926
    _AdjustCandidatePool(self)
1927

    
1928

    
1929
class LUQueryNodes(NoHooksLU):
1930
  """Logical unit for querying nodes.
1931

1932
  """
1933
  _OP_REQP = ["output_fields", "names", "use_locking"]
1934
  REQ_BGL = False
1935
  _FIELDS_DYNAMIC = utils.FieldSet(
1936
    "dtotal", "dfree",
1937
    "mtotal", "mnode", "mfree",
1938
    "bootid",
1939
    "ctotal", "cnodes", "csockets",
1940
    )
1941

    
1942
  _FIELDS_STATIC = utils.FieldSet(
1943
    "name", "pinst_cnt", "sinst_cnt",
1944
    "pinst_list", "sinst_list",
1945
    "pip", "sip", "tags",
1946
    "serial_no",
1947
    "master_candidate",
1948
    "master",
1949
    "offline",
1950
    "drained",
1951
    )
1952

    
1953
  def ExpandNames(self):
1954
    _CheckOutputFields(static=self._FIELDS_STATIC,
1955
                       dynamic=self._FIELDS_DYNAMIC,
1956
                       selected=self.op.output_fields)
1957

    
1958
    self.needed_locks = {}
1959
    self.share_locks[locking.LEVEL_NODE] = 1
1960

    
1961
    if self.op.names:
1962
      self.wanted = _GetWantedNodes(self, self.op.names)
1963
    else:
1964
      self.wanted = locking.ALL_SET
1965

    
1966
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1967
    self.do_locking = self.do_node_query and self.op.use_locking
1968
    if self.do_locking:
1969
      # if we don't request only static fields, we need to lock the nodes
1970
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1971

    
1972

    
1973
  def CheckPrereq(self):
1974
    """Check prerequisites.
1975

1976
    """
1977
    # The validation of the node list is done in the _GetWantedNodes,
1978
    # if non empty, and if empty, there's no validation to do
1979
    pass
1980

    
1981
  def Exec(self, feedback_fn):
1982
    """Computes the list of nodes and their attributes.
1983

1984
    """
1985
    all_info = self.cfg.GetAllNodesInfo()
1986
    if self.do_locking:
1987
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1988
    elif self.wanted != locking.ALL_SET:
1989
      nodenames = self.wanted
1990
      missing = set(nodenames).difference(all_info.keys())
1991
      if missing:
1992
        raise errors.OpExecError(
1993
          "Some nodes were removed before retrieving their data: %s" % missing)
1994
    else:
1995
      nodenames = all_info.keys()
1996

    
1997
    nodenames = utils.NiceSort(nodenames)
1998
    nodelist = [all_info[name] for name in nodenames]
1999

    
2000
    # begin data gathering
2001

    
2002
    if self.do_node_query:
2003
      live_data = {}
2004
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2005
                                          self.cfg.GetHypervisorType())
2006
      for name in nodenames:
2007
        nodeinfo = node_data[name]
2008
        if not nodeinfo.RemoteFailMsg() and nodeinfo.payload:
2009
          nodeinfo = nodeinfo.payload
2010
          fn = utils.TryConvert
2011
          live_data[name] = {
2012
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2013
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2014
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2015
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2016
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2017
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2018
            "bootid": nodeinfo.get('bootid', None),
2019
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2020
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2021
            }
2022
        else:
2023
          live_data[name] = {}
2024
    else:
2025
      live_data = dict.fromkeys(nodenames, {})
2026

    
2027
    node_to_primary = dict([(name, set()) for name in nodenames])
2028
    node_to_secondary = dict([(name, set()) for name in nodenames])
2029

    
2030
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2031
                             "sinst_cnt", "sinst_list"))
2032
    if inst_fields & frozenset(self.op.output_fields):
2033
      instancelist = self.cfg.GetInstanceList()
2034

    
2035
      for instance_name in instancelist:
2036
        inst = self.cfg.GetInstanceInfo(instance_name)
2037
        if inst.primary_node in node_to_primary:
2038
          node_to_primary[inst.primary_node].add(inst.name)
2039
        for secnode in inst.secondary_nodes:
2040
          if secnode in node_to_secondary:
2041
            node_to_secondary[secnode].add(inst.name)
2042

    
2043
    master_node = self.cfg.GetMasterNode()
2044

    
2045
    # end data gathering
2046

    
2047
    output = []
2048
    for node in nodelist:
2049
      node_output = []
2050
      for field in self.op.output_fields:
2051
        if field == "name":
2052
          val = node.name
2053
        elif field == "pinst_list":
2054
          val = list(node_to_primary[node.name])
2055
        elif field == "sinst_list":
2056
          val = list(node_to_secondary[node.name])
2057
        elif field == "pinst_cnt":
2058
          val = len(node_to_primary[node.name])
2059
        elif field == "sinst_cnt":
2060
          val = len(node_to_secondary[node.name])
2061
        elif field == "pip":
2062
          val = node.primary_ip
2063
        elif field == "sip":
2064
          val = node.secondary_ip
2065
        elif field == "tags":
2066
          val = list(node.GetTags())
2067
        elif field == "serial_no":
2068
          val = node.serial_no
2069
        elif field == "master_candidate":
2070
          val = node.master_candidate
2071
        elif field == "master":
2072
          val = node.name == master_node
2073
        elif field == "offline":
2074
          val = node.offline
2075
        elif field == "drained":
2076
          val = node.drained
2077
        elif self._FIELDS_DYNAMIC.Matches(field):
2078
          val = live_data[node.name].get(field, None)
2079
        else:
2080
          raise errors.ParameterError(field)
2081
        node_output.append(val)
2082
      output.append(node_output)
2083

    
2084
    return output
2085

    
2086

    
2087
class LUQueryNodeVolumes(NoHooksLU):
2088
  """Logical unit for getting volumes on node(s).
2089

2090
  """
2091
  _OP_REQP = ["nodes", "output_fields"]
2092
  REQ_BGL = False
2093
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2094
  _FIELDS_STATIC = utils.FieldSet("node")
2095

    
2096
  def ExpandNames(self):
2097
    _CheckOutputFields(static=self._FIELDS_STATIC,
2098
                       dynamic=self._FIELDS_DYNAMIC,
2099
                       selected=self.op.output_fields)
2100

    
2101
    self.needed_locks = {}
2102
    self.share_locks[locking.LEVEL_NODE] = 1
2103
    if not self.op.nodes:
2104
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2105
    else:
2106
      self.needed_locks[locking.LEVEL_NODE] = \
2107
        _GetWantedNodes(self, self.op.nodes)
2108

    
2109
  def CheckPrereq(self):
2110
    """Check prerequisites.
2111

2112
    This checks that the fields required are valid output fields.
2113

2114
    """
2115
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2116

    
2117
  def Exec(self, feedback_fn):
2118
    """Computes the list of nodes and their attributes.
2119

2120
    """
2121
    nodenames = self.nodes
2122
    volumes = self.rpc.call_node_volumes(nodenames)
2123

    
2124
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2125
             in self.cfg.GetInstanceList()]
2126

    
2127
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2128

    
2129
    output = []
2130
    for node in nodenames:
2131
      nresult = volumes[node]
2132
      if nresult.offline:
2133
        continue
2134
      msg = nresult.RemoteFailMsg()
2135
      if msg:
2136
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
2137
        continue
2138

    
2139
      node_vols = nresult.payload[:]
2140
      node_vols.sort(key=lambda vol: vol['dev'])
2141

    
2142
      for vol in node_vols:
2143
        node_output = []
2144
        for field in self.op.output_fields:
2145
          if field == "node":
2146
            val = node
2147
          elif field == "phys":
2148
            val = vol['dev']
2149
          elif field == "vg":
2150
            val = vol['vg']
2151
          elif field == "name":
2152
            val = vol['name']
2153
          elif field == "size":
2154
            val = int(float(vol['size']))
2155
          elif field == "instance":
2156
            for inst in ilist:
2157
              if node not in lv_by_node[inst]:
2158
                continue
2159
              if vol['name'] in lv_by_node[inst][node]:
2160
                val = inst.name
2161
                break
2162
            else:
2163
              val = '-'
2164
          else:
2165
            raise errors.ParameterError(field)
2166
          node_output.append(str(val))
2167

    
2168
        output.append(node_output)
2169

    
2170
    return output
2171

    
2172

    
2173
class LUAddNode(LogicalUnit):
2174
  """Logical unit for adding node to the cluster.
2175

2176
  """
2177
  HPATH = "node-add"
2178
  HTYPE = constants.HTYPE_NODE
2179
  _OP_REQP = ["node_name"]
2180

    
2181
  def BuildHooksEnv(self):
2182
    """Build hooks env.
2183

2184
    This will run on all nodes before, and on all nodes + the new node after.
2185

2186
    """
2187
    env = {
2188
      "OP_TARGET": self.op.node_name,
2189
      "NODE_NAME": self.op.node_name,
2190
      "NODE_PIP": self.op.primary_ip,
2191
      "NODE_SIP": self.op.secondary_ip,
2192
      }
2193
    nodes_0 = self.cfg.GetNodeList()
2194
    nodes_1 = nodes_0 + [self.op.node_name, ]
2195
    return env, nodes_0, nodes_1
2196

    
2197
  def CheckPrereq(self):
2198
    """Check prerequisites.
2199

2200
    This checks:
2201
     - the new node is not already in the config
2202
     - it is resolvable
2203
     - its parameters (single/dual homed) matches the cluster
2204

2205
    Any errors are signalled by raising errors.OpPrereqError.
2206

2207
    """
2208
    node_name = self.op.node_name
2209
    cfg = self.cfg
2210

    
2211
    dns_data = utils.HostInfo(node_name)
2212

    
2213
    node = dns_data.name
2214
    primary_ip = self.op.primary_ip = dns_data.ip
2215
    secondary_ip = getattr(self.op, "secondary_ip", None)
2216
    if secondary_ip is None:
2217
      secondary_ip = primary_ip
2218
    if not utils.IsValidIP(secondary_ip):
2219
      raise errors.OpPrereqError("Invalid secondary IP given")
2220
    self.op.secondary_ip = secondary_ip
2221

    
2222
    node_list = cfg.GetNodeList()
2223
    if not self.op.readd and node in node_list:
2224
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2225
                                 node)
2226
    elif self.op.readd and node not in node_list:
2227
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2228

    
2229
    for existing_node_name in node_list:
2230
      existing_node = cfg.GetNodeInfo(existing_node_name)
2231

    
2232
      if self.op.readd and node == existing_node_name:
2233
        if (existing_node.primary_ip != primary_ip or
2234
            existing_node.secondary_ip != secondary_ip):
2235
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2236
                                     " address configuration as before")
2237
        continue
2238

    
2239
      if (existing_node.primary_ip == primary_ip or
2240
          existing_node.secondary_ip == primary_ip or
2241
          existing_node.primary_ip == secondary_ip or
2242
          existing_node.secondary_ip == secondary_ip):
2243
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2244
                                   " existing node %s" % existing_node.name)
2245

    
2246
    # check that the type of the node (single versus dual homed) is the
2247
    # same as for the master
2248
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2249
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2250
    newbie_singlehomed = secondary_ip == primary_ip
2251
    if master_singlehomed != newbie_singlehomed:
2252
      if master_singlehomed:
2253
        raise errors.OpPrereqError("The master has no private ip but the"
2254
                                   " new node has one")
2255
      else:
2256
        raise errors.OpPrereqError("The master has a private ip but the"
2257
                                   " new node doesn't have one")
2258

    
2259
    # checks reachablity
2260
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2261
      raise errors.OpPrereqError("Node not reachable by ping")
2262

    
2263
    if not newbie_singlehomed:
2264
      # check reachability from my secondary ip to newbie's secondary ip
2265
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2266
                           source=myself.secondary_ip):
2267
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2268
                                   " based ping to noded port")
2269

    
2270
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2271
    mc_now, _ = self.cfg.GetMasterCandidateStats()
2272
    master_candidate = mc_now < cp_size
2273

    
2274
    self.new_node = objects.Node(name=node,
2275
                                 primary_ip=primary_ip,
2276
                                 secondary_ip=secondary_ip,
2277
                                 master_candidate=master_candidate,
2278
                                 offline=False, drained=False)
2279

    
2280
  def Exec(self, feedback_fn):
2281
    """Adds the new node to the cluster.
2282

2283
    """
2284
    new_node = self.new_node
2285
    node = new_node.name
2286

    
2287
    # check connectivity
2288
    result = self.rpc.call_version([node])[node]
2289
    msg = result.RemoteFailMsg()
2290
    if msg:
2291
      raise errors.OpExecError("Can't get version information from"
2292
                               " node %s: %s" % (node, msg))
2293
    if constants.PROTOCOL_VERSION == result.payload:
2294
      logging.info("Communication to node %s fine, sw version %s match",
2295
                   node, result.payload)
2296
    else:
2297
      raise errors.OpExecError("Version mismatch master version %s,"
2298
                               " node version %s" %
2299
                               (constants.PROTOCOL_VERSION, result.payload))
2300

    
2301
    # setup ssh on node
2302
    logging.info("Copy ssh key to node %s", node)
2303
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2304
    keyarray = []
2305
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2306
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2307
                priv_key, pub_key]
2308

    
2309
    for i in keyfiles:
2310
      f = open(i, 'r')
2311
      try:
2312
        keyarray.append(f.read())
2313
      finally:
2314
        f.close()
2315

    
2316
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2317
                                    keyarray[2],
2318
                                    keyarray[3], keyarray[4], keyarray[5])
2319

    
2320
    msg = result.RemoteFailMsg()
2321
    if msg:
2322
      raise errors.OpExecError("Cannot transfer ssh keys to the"
2323
                               " new node: %s" % msg)
2324

    
2325
    # Add node to our /etc/hosts, and add key to known_hosts
2326
    if self.cfg.GetClusterInfo().modify_etc_hosts:
2327
      utils.AddHostToEtcHosts(new_node.name)
2328

    
2329
    if new_node.secondary_ip != new_node.primary_ip:
2330
      result = self.rpc.call_node_has_ip_address(new_node.name,
2331
                                                 new_node.secondary_ip)
2332
      msg = result.RemoteFailMsg()
2333
      if msg:
2334
        raise errors.OpPrereqError("Failure checking secondary ip"
2335
                                   " on node %s: %s" % (new_node.name, msg))
2336
      if not result.payload:
2337
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2338
                                 " you gave (%s). Please fix and re-run this"
2339
                                 " command." % new_node.secondary_ip)
2340

    
2341
    node_verify_list = [self.cfg.GetMasterNode()]
2342
    node_verify_param = {
2343
      'nodelist': [node],
2344
      # TODO: do a node-net-test as well?
2345
    }
2346

    
2347
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2348
                                       self.cfg.GetClusterName())
2349
    for verifier in node_verify_list:
2350
      msg = result[verifier].RemoteFailMsg()
2351
      if msg:
2352
        raise errors.OpExecError("Cannot communicate with node %s: %s" %
2353
                                 (verifier, msg))
2354
      nl_payload = result[verifier].payload['nodelist']
2355
      if nl_payload:
2356
        for failed in nl_payload:
2357
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2358
                      (verifier, nl_payload[failed]))
2359
        raise errors.OpExecError("ssh/hostname verification failed.")
2360

    
2361
    if self.op.readd:
2362
      _RedistributeAncillaryFiles(self)
2363
      self.context.ReaddNode(new_node)
2364
    else:
2365
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
2366
      self.context.AddNode(new_node)
2367

    
2368

    
2369
class LUSetNodeParams(LogicalUnit):
2370
  """Modifies the parameters of a node.
2371

2372
  """
2373
  HPATH = "node-modify"
2374
  HTYPE = constants.HTYPE_NODE
2375
  _OP_REQP = ["node_name"]
2376
  REQ_BGL = False
2377

    
2378
  def CheckArguments(self):
2379
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2380
    if node_name is None:
2381
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2382
    self.op.node_name = node_name
2383
    _CheckBooleanOpField(self.op, 'master_candidate')
2384
    _CheckBooleanOpField(self.op, 'offline')
2385
    _CheckBooleanOpField(self.op, 'drained')
2386
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2387
    if all_mods.count(None) == 3:
2388
      raise errors.OpPrereqError("Please pass at least one modification")
2389
    if all_mods.count(True) > 1:
2390
      raise errors.OpPrereqError("Can't set the node into more than one"
2391
                                 " state at the same time")
2392

    
2393
  def ExpandNames(self):
2394
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2395

    
2396
  def BuildHooksEnv(self):
2397
    """Build hooks env.
2398

2399
    This runs on the master node.
2400

2401
    """
2402
    env = {
2403
      "OP_TARGET": self.op.node_name,
2404
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2405
      "OFFLINE": str(self.op.offline),
2406
      "DRAINED": str(self.op.drained),
2407
      }
2408
    nl = [self.cfg.GetMasterNode(),
2409
          self.op.node_name]
2410
    return env, nl, nl
2411

    
2412
  def CheckPrereq(self):
2413
    """Check prerequisites.
2414

2415
    This only checks the instance list against the existing names.
2416

2417
    """
2418
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2419

    
2420
    if ((self.op.master_candidate == False or self.op.offline == True or
2421
         self.op.drained == True) and node.master_candidate):
2422
      # we will demote the node from master_candidate
2423
      if self.op.node_name == self.cfg.GetMasterNode():
2424
        raise errors.OpPrereqError("The master node has to be a"
2425
                                   " master candidate, online and not drained")
2426
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2427
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2428
      if num_candidates <= cp_size:
2429
        msg = ("Not enough master candidates (desired"
2430
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2431
        if self.op.force:
2432
          self.LogWarning(msg)
2433
        else:
2434
          raise errors.OpPrereqError(msg)
2435

    
2436
    if (self.op.master_candidate == True and
2437
        ((node.offline and not self.op.offline == False) or
2438
         (node.drained and not self.op.drained == False))):
2439
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2440
                                 " to master_candidate" % node.name)
2441

    
2442
    return
2443

    
2444
  def Exec(self, feedback_fn):
2445
    """Modifies a node.
2446

2447
    """
2448
    node = self.node
2449

    
2450
    result = []
2451
    changed_mc = False
2452

    
2453
    if self.op.offline is not None:
2454
      node.offline = self.op.offline
2455
      result.append(("offline", str(self.op.offline)))
2456
      if self.op.offline == True:
2457
        if node.master_candidate:
2458
          node.master_candidate = False
2459
          changed_mc = True
2460
          result.append(("master_candidate", "auto-demotion due to offline"))
2461
        if node.drained:
2462
          node.drained = False
2463
          result.append(("drained", "clear drained status due to offline"))
2464

    
2465
    if self.op.master_candidate is not None:
2466
      node.master_candidate = self.op.master_candidate
2467
      changed_mc = True
2468
      result.append(("master_candidate", str(self.op.master_candidate)))
2469
      if self.op.master_candidate == False:
2470
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2471
        msg = rrc.RemoteFailMsg()
2472
        if msg:
2473
          self.LogWarning("Node failed to demote itself: %s" % msg)
2474

    
2475
    if self.op.drained is not None:
2476
      node.drained = self.op.drained
2477
      result.append(("drained", str(self.op.drained)))
2478
      if self.op.drained == True:
2479
        if node.master_candidate:
2480
          node.master_candidate = False
2481
          changed_mc = True
2482
          result.append(("master_candidate", "auto-demotion due to drain"))
2483
        if node.offline:
2484
          node.offline = False
2485
          result.append(("offline", "clear offline status due to drain"))
2486

    
2487
    # this will trigger configuration file update, if needed
2488
    self.cfg.Update(node)
2489
    # this will trigger job queue propagation or cleanup
2490
    if changed_mc:
2491
      self.context.ReaddNode(node)
2492

    
2493
    return result
2494

    
2495

    
2496
class LUPowercycleNode(NoHooksLU):
2497
  """Powercycles a node.
2498

2499
  """
2500
  _OP_REQP = ["node_name", "force"]
2501
  REQ_BGL = False
2502

    
2503
  def CheckArguments(self):
2504
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2505
    if node_name is None:
2506
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2507
    self.op.node_name = node_name
2508
    if node_name == self.cfg.GetMasterNode() and not self.op.force:
2509
      raise errors.OpPrereqError("The node is the master and the force"
2510
                                 " parameter was not set")
2511

    
2512
  def ExpandNames(self):
2513
    """Locking for PowercycleNode.
2514

2515
    This is a last-resource option and shouldn't block on other
2516
    jobs. Therefore, we grab no locks.
2517

2518
    """
2519
    self.needed_locks = {}
2520

    
2521
  def CheckPrereq(self):
2522
    """Check prerequisites.
2523

2524
    This LU has no prereqs.
2525

2526
    """
2527
    pass
2528

    
2529
  def Exec(self, feedback_fn):
2530
    """Reboots a node.
2531

2532
    """
2533
    result = self.rpc.call_node_powercycle(self.op.node_name,
2534
                                           self.cfg.GetHypervisorType())
2535
    msg = result.RemoteFailMsg()
2536
    if msg:
2537
      raise errors.OpExecError("Failed to schedule the reboot: %s" % msg)
2538
    return result.payload
2539

    
2540

    
2541
class LUQueryClusterInfo(NoHooksLU):
2542
  """Query cluster configuration.
2543

2544
  """
2545
  _OP_REQP = []
2546
  REQ_BGL = False
2547

    
2548
  def ExpandNames(self):
2549
    self.needed_locks = {}
2550

    
2551
  def CheckPrereq(self):
2552
    """No prerequsites needed for this LU.
2553

2554
    """
2555
    pass
2556

    
2557
  def Exec(self, feedback_fn):
2558
    """Return cluster config.
2559

2560
    """
2561
    cluster = self.cfg.GetClusterInfo()
2562
    result = {
2563
      "software_version": constants.RELEASE_VERSION,
2564
      "protocol_version": constants.PROTOCOL_VERSION,
2565
      "config_version": constants.CONFIG_VERSION,
2566
      "os_api_version": constants.OS_API_VERSION,
2567
      "export_version": constants.EXPORT_VERSION,
2568
      "architecture": (platform.architecture()[0], platform.machine()),
2569
      "name": cluster.cluster_name,
2570
      "master": cluster.master_node,
2571
      "default_hypervisor": cluster.default_hypervisor,
2572
      "enabled_hypervisors": cluster.enabled_hypervisors,
2573
      "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
2574
                        for hypervisor in cluster.enabled_hypervisors]),
2575
      "beparams": cluster.beparams,
2576
      "nicparams": cluster.nicparams,
2577
      "candidate_pool_size": cluster.candidate_pool_size,
2578
      "master_netdev": cluster.master_netdev,
2579
      "volume_group_name": cluster.volume_group_name,
2580
      "file_storage_dir": cluster.file_storage_dir,
2581
      }
2582

    
2583
    return result
2584

    
2585

    
2586
class LUQueryConfigValues(NoHooksLU):
2587
  """Return configuration values.
2588

2589
  """
2590
  _OP_REQP = []
2591
  REQ_BGL = False
2592
  _FIELDS_DYNAMIC = utils.FieldSet()
2593
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2594

    
2595
  def ExpandNames(self):
2596
    self.needed_locks = {}
2597

    
2598
    _CheckOutputFields(static=self._FIELDS_STATIC,
2599
                       dynamic=self._FIELDS_DYNAMIC,
2600
                       selected=self.op.output_fields)
2601

    
2602
  def CheckPrereq(self):
2603
    """No prerequisites.
2604

2605
    """
2606
    pass
2607

    
2608
  def Exec(self, feedback_fn):
2609
    """Dump a representation of the cluster config to the standard output.
2610

2611
    """
2612
    values = []
2613
    for field in self.op.output_fields:
2614
      if field == "cluster_name":
2615
        entry = self.cfg.GetClusterName()
2616
      elif field == "master_node":
2617
        entry = self.cfg.GetMasterNode()
2618
      elif field == "drain_flag":
2619
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2620
      else:
2621
        raise errors.ParameterError(field)
2622
      values.append(entry)
2623
    return values
2624

    
2625

    
2626
class LUActivateInstanceDisks(NoHooksLU):
2627
  """Bring up an instance's disks.
2628

2629
  """
2630
  _OP_REQP = ["instance_name"]
2631
  REQ_BGL = False
2632

    
2633
  def ExpandNames(self):
2634
    self._ExpandAndLockInstance()
2635
    self.needed_locks[locking.LEVEL_NODE] = []
2636
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2637

    
2638
  def DeclareLocks(self, level):
2639
    if level == locking.LEVEL_NODE:
2640
      self._LockInstancesNodes()
2641

    
2642
  def CheckPrereq(self):
2643
    """Check prerequisites.
2644

2645
    This checks that the instance is in the cluster.
2646

2647
    """
2648
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2649
    assert self.instance is not None, \
2650
      "Cannot retrieve locked instance %s" % self.op.instance_name
2651
    _CheckNodeOnline(self, self.instance.primary_node)
2652

    
2653
  def Exec(self, feedback_fn):
2654
    """Activate the disks.
2655

2656
    """
2657
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2658
    if not disks_ok:
2659
      raise errors.OpExecError("Cannot activate block devices")
2660

    
2661
    return disks_info
2662

    
2663

    
2664
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2665
  """Prepare the block devices for an instance.
2666

2667
  This sets up the block devices on all nodes.
2668

2669
  @type lu: L{LogicalUnit}
2670
  @param lu: the logical unit on whose behalf we execute
2671
  @type instance: L{objects.Instance}
2672
  @param instance: the instance for whose disks we assemble
2673
  @type ignore_secondaries: boolean
2674
  @param ignore_secondaries: if true, errors on secondary nodes
2675
      won't result in an error return from the function
2676
  @return: False if the operation failed, otherwise a list of
2677
      (host, instance_visible_name, node_visible_name)
2678
      with the mapping from node devices to instance devices
2679

2680
  """
2681
  device_info = []
2682
  disks_ok = True
2683
  iname = instance.name
2684
  # With the two passes mechanism we try to reduce the window of
2685
  # opportunity for the race condition of switching DRBD to primary
2686
  # before handshaking occured, but we do not eliminate it
2687

    
2688
  # The proper fix would be to wait (with some limits) until the
2689
  # connection has been made and drbd transitions from WFConnection
2690
  # into any other network-connected state (Connected, SyncTarget,
2691
  # SyncSource, etc.)
2692

    
2693
  # 1st pass, assemble on all nodes in secondary mode
2694
  for inst_disk in instance.disks:
2695
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2696
      lu.cfg.SetDiskID(node_disk, node)
2697
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2698
      msg = result.RemoteFailMsg()
2699
      if msg:
2700
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2701
                           " (is_primary=False, pass=1): %s",
2702
                           inst_disk.iv_name, node, msg)
2703
        if not ignore_secondaries:
2704
          disks_ok = False
2705

    
2706
  # FIXME: race condition on drbd migration to primary
2707

    
2708
  # 2nd pass, do only the primary node
2709
  for inst_disk in instance.disks:
2710
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2711
      if node != instance.primary_node:
2712
        continue
2713
      lu.cfg.SetDiskID(node_disk, node)
2714
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2715
      msg = result.RemoteFailMsg()
2716
      if msg:
2717
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2718
                           " (is_primary=True, pass=2): %s",
2719
                           inst_disk.iv_name, node, msg)
2720
        disks_ok = False
2721
    device_info.append((instance.primary_node, inst_disk.iv_name,
2722
                        result.payload))
2723

    
2724
  # leave the disks configured for the primary node
2725
  # this is a workaround that would be fixed better by
2726
  # improving the logical/physical id handling
2727
  for disk in instance.disks:
2728
    lu.cfg.SetDiskID(disk, instance.primary_node)
2729

    
2730
  return disks_ok, device_info
2731

    
2732

    
2733
def _StartInstanceDisks(lu, instance, force):
2734
  """Start the disks of an instance.
2735

2736
  """
2737
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2738
                                           ignore_secondaries=force)
2739
  if not disks_ok:
2740
    _ShutdownInstanceDisks(lu, instance)
2741
    if force is not None and not force:
2742
      lu.proc.LogWarning("", hint="If the message above refers to a"
2743
                         " secondary node,"
2744
                         " you can retry the operation using '--force'.")
2745
    raise errors.OpExecError("Disk consistency error")
2746

    
2747

    
2748
class LUDeactivateInstanceDisks(NoHooksLU):
2749
  """Shutdown an instance's disks.
2750

2751
  """
2752
  _OP_REQP = ["instance_name"]
2753
  REQ_BGL = False
2754

    
2755
  def ExpandNames(self):
2756
    self._ExpandAndLockInstance()
2757
    self.needed_locks[locking.LEVEL_NODE] = []
2758
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2759

    
2760
  def DeclareLocks(self, level):
2761
    if level == locking.LEVEL_NODE:
2762
      self._LockInstancesNodes()
2763

    
2764
  def CheckPrereq(self):
2765
    """Check prerequisites.
2766

2767
    This checks that the instance is in the cluster.
2768

2769
    """
2770
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2771
    assert self.instance is not None, \
2772
      "Cannot retrieve locked instance %s" % self.op.instance_name
2773

    
2774
  def Exec(self, feedback_fn):
2775
    """Deactivate the disks
2776

2777
    """
2778
    instance = self.instance
2779
    _SafeShutdownInstanceDisks(self, instance)
2780

    
2781

    
2782
def _SafeShutdownInstanceDisks(lu, instance):
2783
  """Shutdown block devices of an instance.
2784

2785
  This function checks if an instance is running, before calling
2786
  _ShutdownInstanceDisks.
2787

2788
  """
2789
  pnode = instance.primary_node
2790
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])
2791
  ins_l = ins_l[pnode]
2792
  msg = ins_l.RemoteFailMsg()
2793
  if msg:
2794
    raise errors.OpExecError("Can't contact node %s: %s" % (pnode, msg))
2795

    
2796
  if instance.name in ins_l.payload:
2797
    raise errors.OpExecError("Instance is running, can't shutdown"
2798
                             " block devices.")
2799

    
2800
  _ShutdownInstanceDisks(lu, instance)
2801

    
2802

    
2803
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2804
  """Shutdown block devices of an instance.
2805

2806
  This does the shutdown on all nodes of the instance.
2807

2808
  If the ignore_primary is false, errors on the primary node are
2809
  ignored.
2810

2811
  """
2812
  all_result = True
2813
  for disk in instance.disks:
2814
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2815
      lu.cfg.SetDiskID(top_disk, node)
2816
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2817
      msg = result.RemoteFailMsg()
2818
      if msg:
2819
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2820
                      disk.iv_name, node, msg)
2821
        if not ignore_primary or node != instance.primary_node:
2822
          all_result = False
2823
  return all_result
2824

    
2825

    
2826
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2827
  """Checks if a node has enough free memory.
2828

2829
  This function check if a given node has the needed amount of free
2830
  memory. In case the node has less memory or we cannot get the
2831
  information from the node, this function raise an OpPrereqError
2832
  exception.
2833

2834
  @type lu: C{LogicalUnit}
2835
  @param lu: a logical unit from which we get configuration data
2836
  @type node: C{str}
2837
  @param node: the node to check
2838
  @type reason: C{str}
2839
  @param reason: string to use in the error message
2840
  @type requested: C{int}
2841
  @param requested: the amount of memory in MiB to check for
2842
  @type hypervisor_name: C{str}
2843
  @param hypervisor_name: the hypervisor to ask for memory stats
2844
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2845
      we cannot check the node
2846

2847
  """
2848
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2849
  msg = nodeinfo[node].RemoteFailMsg()
2850
  if msg:
2851
    raise errors.OpPrereqError("Can't get data from node %s: %s" % (node, msg))
2852
  free_mem = nodeinfo[node].payload.get('memory_free', None)
2853
  if not isinstance(free_mem, int):
2854
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2855
                               " was '%s'" % (node, free_mem))
2856
  if requested > free_mem:
2857
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2858
                               " needed %s MiB, available %s MiB" %
2859
                               (node, reason, requested, free_mem))
2860

    
2861

    
2862
class LUStartupInstance(LogicalUnit):
2863
  """Starts an instance.
2864

2865
  """
2866
  HPATH = "instance-start"
2867
  HTYPE = constants.HTYPE_INSTANCE
2868
  _OP_REQP = ["instance_name", "force"]
2869
  REQ_BGL = False
2870

    
2871
  def ExpandNames(self):
2872
    self._ExpandAndLockInstance()
2873

    
2874
  def BuildHooksEnv(self):
2875
    """Build hooks env.
2876

2877
    This runs on master, primary and secondary nodes of the instance.
2878

2879
    """
2880
    env = {
2881
      "FORCE": self.op.force,
2882
      }
2883
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2884
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2885
    return env, nl, nl
2886

    
2887
  def CheckPrereq(self):
2888
    """Check prerequisites.
2889

2890
    This checks that the instance is in the cluster.
2891

2892
    """
2893
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2894
    assert self.instance is not None, \
2895
      "Cannot retrieve locked instance %s" % self.op.instance_name
2896

    
2897
    # extra beparams
2898
    self.beparams = getattr(self.op, "beparams", {})
2899
    if self.beparams:
2900
      if not isinstance(self.beparams, dict):
2901
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
2902
                                   " dict" % (type(self.beparams), ))
2903
      # fill the beparams dict
2904
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
2905
      self.op.beparams = self.beparams
2906

    
2907
    # extra hvparams
2908
    self.hvparams = getattr(self.op, "hvparams", {})
2909
    if self.hvparams:
2910
      if not isinstance(self.hvparams, dict):
2911
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
2912
                                   " dict" % (type(self.hvparams), ))
2913

    
2914
      # check hypervisor parameter syntax (locally)
2915
      cluster = self.cfg.GetClusterInfo()
2916
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
2917
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
2918
                                    instance.hvparams)
2919
      filled_hvp.update(self.hvparams)
2920
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
2921
      hv_type.CheckParameterSyntax(filled_hvp)
2922
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
2923
      self.op.hvparams = self.hvparams
2924

    
2925
    _CheckNodeOnline(self, instance.primary_node)
2926

    
2927
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2928
    # check bridges existance
2929
    _CheckInstanceBridgesExist(self, instance)
2930

    
2931
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2932
                                              instance.name,
2933
                                              instance.hypervisor)
2934
    msg = remote_info.RemoteFailMsg()
2935
    if msg:
2936
      raise errors.OpPrereqError("Error checking node %s: %s" %
2937
                                 (instance.primary_node, msg))
2938
    if not remote_info.payload: # not running already
2939
      _CheckNodeFreeMemory(self, instance.primary_node,
2940
                           "starting instance %s" % instance.name,
2941
                           bep[constants.BE_MEMORY], instance.hypervisor)
2942

    
2943
  def Exec(self, feedback_fn):
2944
    """Start the instance.
2945

2946
    """
2947
    instance = self.instance
2948
    force = self.op.force
2949

    
2950
    self.cfg.MarkInstanceUp(instance.name)
2951

    
2952
    node_current = instance.primary_node
2953

    
2954
    _StartInstanceDisks(self, instance, force)
2955

    
2956
    result = self.rpc.call_instance_start(node_current, instance,
2957
                                          self.hvparams, self.beparams)
2958
    msg = result.RemoteFailMsg()
2959
    if msg:
2960
      _ShutdownInstanceDisks(self, instance)
2961
      raise errors.OpExecError("Could not start instance: %s" % msg)
2962

    
2963

    
2964
class LURebootInstance(LogicalUnit):
2965
  """Reboot an instance.
2966

2967
  """
2968
  HPATH = "instance-reboot"
2969
  HTYPE = constants.HTYPE_INSTANCE
2970
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2971
  REQ_BGL = False
2972

    
2973
  def ExpandNames(self):
2974
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2975
                                   constants.INSTANCE_REBOOT_HARD,
2976
                                   constants.INSTANCE_REBOOT_FULL]:
2977
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2978
                                  (constants.INSTANCE_REBOOT_SOFT,
2979
                                   constants.INSTANCE_REBOOT_HARD,
2980
                                   constants.INSTANCE_REBOOT_FULL))
2981
    self._ExpandAndLockInstance()
2982

    
2983
  def BuildHooksEnv(self):
2984
    """Build hooks env.
2985

2986
    This runs on master, primary and secondary nodes of the instance.
2987

2988
    """
2989
    env = {
2990
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2991
      "REBOOT_TYPE": self.op.reboot_type,
2992
      }
2993
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2994
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2995
    return env, nl, nl
2996

    
2997
  def CheckPrereq(self):
2998
    """Check prerequisites.
2999

3000
    This checks that the instance is in the cluster.
3001

3002
    """
3003
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3004
    assert self.instance is not None, \
3005
      "Cannot retrieve locked instance %s" % self.op.instance_name
3006

    
3007
    _CheckNodeOnline(self, instance.primary_node)
3008

    
3009
    # check bridges existance
3010
    _CheckInstanceBridgesExist(self, instance)
3011

    
3012
  def Exec(self, feedback_fn):
3013
    """Reboot the instance.
3014

3015
    """
3016
    instance = self.instance
3017
    ignore_secondaries = self.op.ignore_secondaries
3018
    reboot_type = self.op.reboot_type
3019

    
3020
    node_current = instance.primary_node
3021

    
3022
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
3023
                       constants.INSTANCE_REBOOT_HARD]:
3024
      for disk in instance.disks:
3025
        self.cfg.SetDiskID(disk, node_current)
3026
      result = self.rpc.call_instance_reboot(node_current, instance,
3027
                                             reboot_type)
3028
      msg = result.RemoteFailMsg()
3029
      if msg:
3030
        raise errors.OpExecError("Could not reboot instance: %s" % msg)
3031
    else:
3032
      result = self.rpc.call_instance_shutdown(node_current, instance)
3033
      msg = result.RemoteFailMsg()
3034
      if msg:
3035
        raise errors.OpExecError("Could not shutdown instance for"
3036
                                 " full reboot: %s" % msg)
3037
      _ShutdownInstanceDisks(self, instance)
3038
      _StartInstanceDisks(self, instance, ignore_secondaries)
3039
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3040
      msg = result.RemoteFailMsg()
3041
      if msg:
3042
        _ShutdownInstanceDisks(self, instance)
3043
        raise errors.OpExecError("Could not start instance for"
3044
                                 " full reboot: %s" % msg)
3045

    
3046
    self.cfg.MarkInstanceUp(instance.name)
3047

    
3048

    
3049
class LUShutdownInstance(LogicalUnit):
3050
  """Shutdown an instance.
3051

3052
  """
3053
  HPATH = "instance-stop"
3054
  HTYPE = constants.HTYPE_INSTANCE
3055
  _OP_REQP = ["instance_name"]
3056
  REQ_BGL = False
3057

    
3058
  def ExpandNames(self):
3059
    self._ExpandAndLockInstance()
3060

    
3061
  def BuildHooksEnv(self):
3062
    """Build hooks env.
3063

3064
    This runs on master, primary and secondary nodes of the instance.
3065

3066
    """
3067
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3068
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3069
    return env, nl, nl
3070

    
3071
  def CheckPrereq(self):
3072
    """Check prerequisites.
3073

3074
    This checks that the instance is in the cluster.
3075

3076
    """
3077
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3078
    assert self.instance is not None, \
3079
      "Cannot retrieve locked instance %s" % self.op.instance_name
3080
    _CheckNodeOnline(self, self.instance.primary_node)
3081

    
3082
  def Exec(self, feedback_fn):
3083
    """Shutdown the instance.
3084

3085
    """
3086
    instance = self.instance
3087
    node_current = instance.primary_node
3088
    self.cfg.MarkInstanceDown(instance.name)
3089
    result = self.rpc.call_instance_shutdown(node_current, instance)
3090
    msg = result.RemoteFailMsg()
3091
    if msg:
3092
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3093

    
3094
    _ShutdownInstanceDisks(self, instance)
3095

    
3096

    
3097
class LUReinstallInstance(LogicalUnit):
3098
  """Reinstall an instance.
3099

3100
  """
3101
  HPATH = "instance-reinstall"
3102
  HTYPE = constants.HTYPE_INSTANCE
3103
  _OP_REQP = ["instance_name"]
3104
  REQ_BGL = False
3105

    
3106
  def ExpandNames(self):
3107
    self._ExpandAndLockInstance()
3108

    
3109
  def BuildHooksEnv(self):
3110
    """Build hooks env.
3111

3112
    This runs on master, primary and secondary nodes of the instance.
3113

3114
    """
3115
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3116
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3117
    return env, nl, nl
3118

    
3119
  def CheckPrereq(self):
3120
    """Check prerequisites.
3121

3122
    This checks that the instance is in the cluster and is not running.
3123

3124
    """
3125
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3126
    assert instance is not None, \
3127
      "Cannot retrieve locked instance %s" % self.op.instance_name
3128
    _CheckNodeOnline(self, instance.primary_node)
3129

    
3130
    if instance.disk_template == constants.DT_DISKLESS:
3131
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3132
                                 self.op.instance_name)
3133
    if instance.admin_up:
3134
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3135
                                 self.op.instance_name)
3136
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3137
                                              instance.name,
3138
                                              instance.hypervisor)
3139
    msg = remote_info.RemoteFailMsg()
3140
    if msg:
3141
      raise errors.OpPrereqError("Error checking node %s: %s" %
3142
                                 (instance.primary_node, msg))
3143
    if remote_info.payload:
3144
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3145
                                 (self.op.instance_name,
3146
                                  instance.primary_node))
3147

    
3148
    self.op.os_type = getattr(self.op, "os_type", None)
3149
    if self.op.os_type is not None:
3150
      # OS verification
3151
      pnode = self.cfg.GetNodeInfo(
3152
        self.cfg.ExpandNodeName(instance.primary_node))
3153
      if pnode is None:
3154
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
3155
                                   self.op.pnode)
3156
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3157
      result.Raise()
3158
      if not isinstance(result.data, objects.OS):
3159
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
3160
                                   " primary node"  % self.op.os_type)
3161

    
3162
    self.instance = instance
3163

    
3164
  def Exec(self, feedback_fn):
3165
    """Reinstall the instance.
3166

3167
    """
3168
    inst = self.instance
3169

    
3170
    if self.op.os_type is not None:
3171
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3172
      inst.os = self.op.os_type
3173
      self.cfg.Update(inst)
3174

    
3175
    _StartInstanceDisks(self, inst, None)
3176
    try:
3177
      feedback_fn("Running the instance OS create scripts...")
3178
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
3179
      msg = result.RemoteFailMsg()
3180
      if msg:
3181
        raise errors.OpExecError("Could not install OS for instance %s"
3182
                                 " on node %s: %s" %
3183
                                 (inst.name, inst.primary_node, msg))
3184
    finally:
3185
      _ShutdownInstanceDisks(self, inst)
3186

    
3187

    
3188
class LURenameInstance(LogicalUnit):
3189
  """Rename an instance.
3190

3191
  """
3192
  HPATH = "instance-rename"
3193
  HTYPE = constants.HTYPE_INSTANCE
3194
  _OP_REQP = ["instance_name", "new_name"]
3195

    
3196
  def BuildHooksEnv(self):
3197
    """Build hooks env.
3198

3199
    This runs on master, primary and secondary nodes of the instance.
3200

3201
    """
3202
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3203
    env["INSTANCE_NEW_NAME"] = self.op.new_name
3204
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3205
    return env, nl, nl
3206

    
3207
  def CheckPrereq(self):
3208
    """Check prerequisites.
3209

3210
    This checks that the instance is in the cluster and is not running.
3211

3212
    """
3213
    instance = self.cfg.GetInstanceInfo(
3214
      self.cfg.ExpandInstanceName(self.op.instance_name))
3215
    if instance is None:
3216
      raise errors.OpPrereqError("Instance '%s' not known" %
3217
                                 self.op.instance_name)
3218
    _CheckNodeOnline(self, instance.primary_node)
3219

    
3220
    if instance.admin_up:
3221
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3222
                                 self.op.instance_name)
3223
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3224
                                              instance.name,
3225
                                              instance.hypervisor)
3226
    msg = remote_info.RemoteFailMsg()
3227
    if msg:
3228
      raise errors.OpPrereqError("Error checking node %s: %s" %
3229
                                 (instance.primary_node, msg))
3230
    if remote_info.payload:
3231
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3232
                                 (self.op.instance_name,
3233
                                  instance.primary_node))
3234
    self.instance = instance
3235

    
3236
    # new name verification
3237
    name_info = utils.HostInfo(self.op.new_name)
3238

    
3239
    self.op.new_name = new_name = name_info.name
3240
    instance_list = self.cfg.GetInstanceList()
3241
    if new_name in instance_list:
3242
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3243
                                 new_name)
3244

    
3245
    if not getattr(self.op, "ignore_ip", False):
3246
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3247
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3248
                                   (name_info.ip, new_name))
3249

    
3250

    
3251
  def Exec(self, feedback_fn):
3252
    """Reinstall the instance.
3253

3254
    """
3255
    inst = self.instance
3256
    old_name = inst.name
3257

    
3258
    if inst.disk_template == constants.DT_FILE:
3259
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3260

    
3261
    self.cfg.RenameInstance(inst.name, self.op.new_name)
3262
    # Change the instance lock. This is definitely safe while we hold the BGL
3263
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3264
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3265

    
3266
    # re-read the instance from the configuration after rename
3267
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3268

    
3269
    if inst.disk_template == constants.DT_FILE:
3270
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3271
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3272
                                                     old_file_storage_dir,
3273
                                                     new_file_storage_dir)
3274
      result.Raise()
3275
      if not result.data:
3276
        raise errors.OpExecError("Could not connect to node '%s' to rename"
3277
                                 " directory '%s' to '%s' (but the instance"
3278
                                 " has been renamed in Ganeti)" % (
3279
                                 inst.primary_node, old_file_storage_dir,
3280
                                 new_file_storage_dir))
3281

    
3282
      if not result.data[0]:
3283
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
3284
                                 " (but the instance has been renamed in"
3285
                                 " Ganeti)" % (old_file_storage_dir,
3286
                                               new_file_storage_dir))
3287

    
3288
    _StartInstanceDisks(self, inst, None)
3289
    try:
3290
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3291
                                                 old_name)
3292
      msg = result.RemoteFailMsg()
3293
      if msg:
3294
        msg = ("Could not run OS rename script for instance %s on node %s"
3295
               " (but the instance has been renamed in Ganeti): %s" %
3296
               (inst.name, inst.primary_node, msg))
3297
        self.proc.LogWarning(msg)
3298
    finally:
3299
      _ShutdownInstanceDisks(self, inst)
3300

    
3301

    
3302
class LURemoveInstance(LogicalUnit):
3303
  """Remove an instance.
3304

3305
  """
3306
  HPATH = "instance-remove"
3307
  HTYPE = constants.HTYPE_INSTANCE
3308
  _OP_REQP = ["instance_name", "ignore_failures"]
3309
  REQ_BGL = False
3310

    
3311
  def ExpandNames(self):
3312
    self._ExpandAndLockInstance()
3313
    self.needed_locks[locking.LEVEL_NODE] = []
3314
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3315

    
3316
  def DeclareLocks(self, level):
3317
    if level == locking.LEVEL_NODE:
3318
      self._LockInstancesNodes()
3319

    
3320
  def BuildHooksEnv(self):
3321
    """Build hooks env.
3322

3323
    This runs on master, primary and secondary nodes of the instance.
3324

3325
    """
3326
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3327
    nl = [self.cfg.GetMasterNode()]
3328
    return env, nl, nl
3329

    
3330
  def CheckPrereq(self):
3331
    """Check prerequisites.
3332

3333
    This checks that the instance is in the cluster.
3334

3335
    """
3336
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3337
    assert self.instance is not None, \
3338
      "Cannot retrieve locked instance %s" % self.op.instance_name
3339

    
3340
  def Exec(self, feedback_fn):
3341
    """Remove the instance.
3342

3343
    """
3344
    instance = self.instance
3345
    logging.info("Shutting down instance %s on node %s",
3346
                 instance.name, instance.primary_node)
3347

    
3348
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3349
    msg = result.RemoteFailMsg()
3350
    if msg:
3351
      if self.op.ignore_failures:
3352
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
3353
      else:
3354
        raise errors.OpExecError("Could not shutdown instance %s on"
3355
                                 " node %s: %s" %
3356
                                 (instance.name, instance.primary_node, msg))
3357

    
3358
    logging.info("Removing block devices for instance %s", instance.name)
3359

    
3360
    if not _RemoveDisks(self, instance):
3361
      if self.op.ignore_failures:
3362
        feedback_fn("Warning: can't remove instance's disks")
3363
      else:
3364
        raise errors.OpExecError("Can't remove instance's disks")
3365

    
3366
    logging.info("Removing instance %s out of cluster config", instance.name)
3367

    
3368
    self.cfg.RemoveInstance(instance.name)
3369
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3370

    
3371

    
3372
class LUQueryInstances(NoHooksLU):
3373
  """Logical unit for querying instances.
3374

3375
  """
3376
  _OP_REQP = ["output_fields", "names", "use_locking"]
3377
  REQ_BGL = False
3378
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3379
                                    "admin_state",
3380
                                    "disk_template", "ip", "mac", "bridge",
3381
                                    "sda_size", "sdb_size", "vcpus", "tags",
3382
                                    "network_port", "beparams",
3383
                                    r"(disk)\.(size)/([0-9]+)",
3384
                                    r"(disk)\.(sizes)", "disk_usage",
3385
                                    r"(nic)\.(mac|ip|bridge)/([0-9]+)",
3386
                                    r"(nic)\.(macs|ips|bridges)",
3387
                                    r"(disk|nic)\.(count)",
3388
                                    "serial_no", "hypervisor", "hvparams",] +
3389
                                  ["hv/%s" % name
3390
                                   for name in constants.HVS_PARAMETERS] +
3391
                                  ["be/%s" % name
3392
                                   for name in constants.BES_PARAMETERS])
3393
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3394

    
3395

    
3396
  def ExpandNames(self):
3397
    _CheckOutputFields(static=self._FIELDS_STATIC,
3398
                       dynamic=self._FIELDS_DYNAMIC,
3399
                       selected=self.op.output_fields)
3400

    
3401
    self.needed_locks = {}
3402
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3403
    self.share_locks[locking.LEVEL_NODE] = 1
3404

    
3405
    if self.op.names:
3406
      self.wanted = _GetWantedInstances(self, self.op.names)
3407
    else:
3408
      self.wanted = locking.ALL_SET
3409

    
3410
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3411
    self.do_locking = self.do_node_query and self.op.use_locking
3412
    if self.do_locking:
3413
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3414
      self.needed_locks[locking.LEVEL_NODE] = []
3415
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3416

    
3417
  def DeclareLocks(self, level):
3418
    if level == locking.LEVEL_NODE and self.do_locking:
3419
      self._LockInstancesNodes()
3420

    
3421
  def CheckPrereq(self):
3422
    """Check prerequisites.
3423

3424
    """
3425
    pass
3426

    
3427
  def Exec(self, feedback_fn):
3428
    """Computes the list of nodes and their attributes.
3429

3430
    """
3431
    all_info = self.cfg.GetAllInstancesInfo()
3432
    if self.wanted == locking.ALL_SET:
3433
      # caller didn't specify instance names, so ordering is not important
3434
      if self.do_locking:
3435
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3436
      else:
3437
        instance_names = all_info.keys()
3438
      instance_names = utils.NiceSort(instance_names)
3439
    else:
3440
      # caller did specify names, so we must keep the ordering
3441
      if self.do_locking:
3442
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3443
      else:
3444
        tgt_set = all_info.keys()
3445
      missing = set(self.wanted).difference(tgt_set)
3446
      if missing:
3447
        raise errors.OpExecError("Some instances were removed before"
3448
                                 " retrieving their data: %s" % missing)
3449
      instance_names = self.wanted
3450

    
3451
    instance_list = [all_info[iname] for iname in instance_names]
3452

    
3453
    # begin data gathering
3454

    
3455
    nodes = frozenset([inst.primary_node for inst in instance_list])
3456
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3457

    
3458
    bad_nodes = []
3459
    off_nodes = []
3460
    if self.do_node_query:
3461
      live_data = {}
3462
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3463
      for name in nodes:
3464
        result = node_data[name]
3465
        if result.offline:
3466
          # offline nodes will be in both lists
3467
          off_nodes.append(name)
3468
        if result.failed or result.RemoteFailMsg():
3469
          bad_nodes.append(name)
3470
        else:
3471
          if result.payload:
3472
            live_data.update(result.payload)
3473
          # else no instance is alive
3474
    else:
3475
      live_data = dict([(name, {}) for name in instance_names])
3476

    
3477
    # end data gathering
3478

    
3479
    HVPREFIX = "hv/"
3480
    BEPREFIX = "be/"
3481
    output = []
3482
    for instance in instance_list:
3483
      iout = []
3484
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3485
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
3486
      for field in self.op.output_fields:
3487
        st_match = self._FIELDS_STATIC.Matches(field)
3488
        if field == "name":
3489
          val = instance.name
3490
        elif field == "os":
3491
          val = instance.os
3492
        elif field == "pnode":
3493
          val = instance.primary_node
3494
        elif field == "snodes":
3495
          val = list(instance.secondary_nodes)
3496
        elif field == "admin_state":
3497
          val = instance.admin_up
3498
        elif field == "oper_state":
3499
          if instance.primary_node in bad_nodes:
3500
            val = None
3501
          else:
3502
            val = bool(live_data.get(instance.name))
3503
        elif field == "status":
3504
          if instance.primary_node in off_nodes:
3505
            val = "ERROR_nodeoffline"
3506
          elif instance.primary_node in bad_nodes:
3507
            val = "ERROR_nodedown"
3508
          else:
3509
            running = bool(live_data.get(instance.name))
3510
            if running:
3511
              if instance.admin_up:
3512
                val = "running"
3513
              else:
3514
                val = "ERROR_up"
3515
            else:
3516
              if instance.admin_up:
3517
                val = "ERROR_down"
3518
              else:
3519
                val = "ADMIN_down"
3520
        elif field == "oper_ram":
3521
          if instance.primary_node in bad_nodes:
3522
            val = None
3523
          elif instance.name in live_data:
3524
            val = live_data[instance.name].get("memory", "?")
3525
          else:
3526
            val = "-"
3527
        elif field == "disk_template":
3528
          val = instance.disk_template
3529
        elif field == "ip":
3530
          val = instance.nics[0].ip
3531
        elif field == "bridge":
3532
          val = instance.nics[0].bridge
3533
        elif field == "mac":
3534
          val = instance.nics[0].mac
3535
        elif field == "sda_size" or field == "sdb_size":
3536
          idx = ord(field[2]) - ord('a')
3537
          try:
3538
            val = instance.FindDisk(idx).size
3539
          except errors.OpPrereqError:
3540
            val = None
3541
        elif field == "disk_usage": # total disk usage per node
3542
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3543
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3544
        elif field == "tags":
3545
          val = list(instance.GetTags())
3546
        elif field == "serial_no":
3547
          val = instance.serial_no
3548
        elif field == "network_port":
3549
          val = instance.network_port
3550
        elif field == "hypervisor":
3551
          val = instance.hypervisor
3552
        elif field == "hvparams":
3553
          val = i_hv
3554
        elif (field.startswith(HVPREFIX) and
3555
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3556
          val = i_hv.get(field[len(HVPREFIX):], None)
3557
        elif field == "beparams":
3558
          val = i_be
3559
        elif (field.startswith(BEPREFIX) and
3560
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3561
          val = i_be.get(field[len(BEPREFIX):], None)
3562
        elif st_match and st_match.groups():
3563
          # matches a variable list
3564
          st_groups = st_match.groups()
3565
          if st_groups and st_groups[0] == "disk":
3566
            if st_groups[1] == "count":
3567
              val = len(instance.disks)
3568
            elif st_groups[1] == "sizes":
3569
              val = [disk.size for disk in instance.disks]
3570
            elif st_groups[1] == "size":
3571
              try:
3572
                val = instance.FindDisk(st_groups[2]).size
3573
              except errors.OpPrereqError:
3574
                val = None
3575
            else:
3576
              assert False, "Unhandled disk parameter"
3577
          elif st_groups[0] == "nic":
3578
            if st_groups[1] == "count":
3579
              val = len(instance.nics)
3580
            elif st_groups[1] == "macs":
3581
              val = [nic.mac for nic in instance.nics]
3582
            elif st_groups[1] == "ips":
3583
              val = [nic.ip for nic in instance.nics]
3584
            elif st_groups[1] == "bridges":
3585
              val = [nic.bridge for nic in instance.nics]
3586
            else:
3587
              # index-based item
3588
              nic_idx = int(st_groups[2])
3589
              if nic_idx >= len(instance.nics):
3590
                val = None
3591
              else:
3592
                if st_groups[1] == "mac":
3593
                  val = instance.nics[nic_idx].mac
3594
                elif st_groups[1] == "ip":
3595
                  val = instance.nics[nic_idx].ip
3596
                elif st_groups[1] == "bridge":
3597
                  val = instance.nics[nic_idx].bridge
3598
                else:
3599
                  assert False, "Unhandled NIC parameter"
3600
          else:
3601
            assert False, "Unhandled variable parameter"
3602
        else:
3603
          raise errors.ParameterError(field)
3604
        iout.append(val)
3605
      output.append(iout)
3606

    
3607
    return output
3608

    
3609

    
3610
class LUFailoverInstance(LogicalUnit):
3611
  """Failover an instance.
3612

3613
  """
3614
  HPATH = "instance-failover"
3615
  HTYPE = constants.HTYPE_INSTANCE
3616
  _OP_REQP = ["instance_name", "ignore_consistency"]
3617
  REQ_BGL = False
3618

    
3619
  def ExpandNames(self):
3620
    self._ExpandAndLockInstance()
3621
    self.needed_locks[locking.LEVEL_NODE] = []
3622
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3623

    
3624
  def DeclareLocks(self, level):
3625
    if level == locking.LEVEL_NODE:
3626
      self._LockInstancesNodes()
3627

    
3628
  def BuildHooksEnv(self):
3629
    """Build hooks env.
3630

3631
    This runs on master, primary and secondary nodes of the instance.
3632

3633
    """
3634
    env = {
3635
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3636
      }
3637
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3638
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3639
    return env, nl, nl
3640

    
3641
  def CheckPrereq(self):
3642
    """Check prerequisites.
3643

3644
    This checks that the instance is in the cluster.
3645

3646
    """
3647
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3648
    assert self.instance is not None, \
3649
      "Cannot retrieve locked instance %s" % self.op.instance_name
3650

    
3651
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3652
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3653
      raise errors.OpPrereqError("Instance's disk layout is not"
3654
                                 " network mirrored, cannot failover.")
3655

    
3656
    secondary_nodes = instance.secondary_nodes
3657
    if not secondary_nodes:
3658
      raise errors.ProgrammerError("no secondary node but using "
3659
                                   "a mirrored disk template")
3660

    
3661
    target_node = secondary_nodes[0]
3662
    _CheckNodeOnline(self, target_node)
3663
    _CheckNodeNotDrained(self, target_node)
3664
    # check memory requirements on the secondary node
3665
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3666
                         instance.name, bep[constants.BE_MEMORY],
3667
                         instance.hypervisor)
3668
    # check bridge existance
3669
    _CheckInstanceBridgesExist(self, instance, node=target_node)
3670

    
3671
  def Exec(self, feedback_fn):
3672
    """Failover an instance.
3673

3674
    The failover is done by shutting it down on its present node and
3675
    starting it on the secondary.
3676

3677
    """
3678
    instance = self.instance
3679

    
3680
    source_node = instance.primary_node
3681
    target_node = instance.secondary_nodes[0]
3682

    
3683
    feedback_fn("* checking disk consistency between source and target")
3684
    for dev in instance.disks:
3685
      # for drbd, these are drbd over lvm
3686
      if not _CheckDiskConsistency(self, dev, target_node, False):
3687
        if instance.admin_up and not self.op.ignore_consistency:
3688
          raise errors.OpExecError("Disk %s is degraded on target node,"
3689
                                   " aborting failover." % dev.iv_name)
3690

    
3691
    feedback_fn("* shutting down instance on source node")
3692
    logging.info("Shutting down instance %s on node %s",
3693
                 instance.name, source_node)
3694

    
3695
    result = self.rpc.call_instance_shutdown(source_node, instance)
3696
    msg = result.RemoteFailMsg()
3697
    if msg:
3698
      if self.op.ignore_consistency:
3699
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3700
                             " Proceeding anyway. Please make sure node"
3701
                             " %s is down. Error details: %s",
3702
                             instance.name, source_node, source_node, msg)
3703
      else:
3704
        raise errors.OpExecError("Could not shutdown instance %s on"
3705
                                 " node %s: %s" %
3706
                                 (instance.name, source_node, msg))
3707

    
3708
    feedback_fn("* deactivating the instance's disks on source node")
3709
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3710
      raise errors.OpExecError("Can't shut down the instance's disks.")
3711

    
3712
    instance.primary_node = target_node
3713
    # distribute new instance config to the other nodes
3714
    self.cfg.Update(instance)
3715

    
3716
    # Only start the instance if it's marked as up
3717
    if instance.admin_up:
3718
      feedback_fn("* activating the instance's disks on target node")
3719
      logging.info("Starting instance %s on node %s",
3720
                   instance.name, target_node)
3721

    
3722
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3723
                                               ignore_secondaries=True)
3724
      if not disks_ok:
3725
        _ShutdownInstanceDisks(self, instance)
3726
        raise errors.OpExecError("Can't activate the instance's disks")
3727

    
3728
      feedback_fn("* starting the instance on the target node")
3729
      result = self.rpc.call_instance_start(target_node, instance, None, None)
3730
      msg = result.RemoteFailMsg()
3731
      if msg:
3732
        _ShutdownInstanceDisks(self, instance)
3733
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3734
                                 (instance.name, target_node, msg))
3735

    
3736

    
3737
class LUMigrateInstance(LogicalUnit):
3738
  """Migrate an instance.
3739

3740
  This is migration without shutting down, compared to the failover,
3741
  which is done with shutdown.
3742

3743
  """
3744
  HPATH = "instance-migrate"
3745
  HTYPE = constants.HTYPE_INSTANCE
3746
  _OP_REQP = ["instance_name", "live", "cleanup"]
3747

    
3748
  REQ_BGL = False
3749

    
3750
  def ExpandNames(self):
3751
    self._ExpandAndLockInstance()
3752
    self.needed_locks[locking.LEVEL_NODE] = []
3753
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3754

    
3755
  def DeclareLocks(self, level):
3756
    if level == locking.LEVEL_NODE:
3757
      self._LockInstancesNodes()
3758

    
3759
  def BuildHooksEnv(self):
3760
    """Build hooks env.
3761

3762
    This runs on master, primary and secondary nodes of the instance.
3763

3764
    """
3765
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3766
    env["MIGRATE_LIVE"] = self.op.live
3767
    env["MIGRATE_CLEANUP"] = self.op.cleanup
3768
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3769
    return env, nl, nl
3770

    
3771
  def CheckPrereq(self):
3772
    """Check prerequisites.
3773

3774
    This checks that the instance is in the cluster.
3775

3776
    """
3777
    instance = self.cfg.GetInstanceInfo(
3778
      self.cfg.ExpandInstanceName(self.op.instance_name))
3779
    if instance is None:
3780
      raise errors.OpPrereqError("Instance '%s' not known" %
3781
                                 self.op.instance_name)
3782

    
3783
    if instance.disk_template != constants.DT_DRBD8:
3784
      raise errors.OpPrereqError("Instance's disk layout is not"
3785
                                 " drbd8, cannot migrate.")
3786

    
3787
    secondary_nodes = instance.secondary_nodes
3788
    if not secondary_nodes:
3789
      raise errors.ConfigurationError("No secondary node but using"
3790
                                      " drbd8 disk template")
3791

    
3792
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3793

    
3794
    target_node = secondary_nodes[0]
3795
    # check memory requirements on the secondary node
3796
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3797
                         instance.name, i_be[constants.BE_MEMORY],
3798
                         instance.hypervisor)
3799

    
3800
    # check bridge existance
3801
    _CheckInstanceBridgesExist(self, instance, node=target_node)
3802

    
3803
    if not self.op.cleanup:
3804
      _CheckNodeNotDrained(self, target_node)
3805
      result = self.rpc.call_instance_migratable(instance.primary_node,
3806
                                                 instance)
3807
      msg = result.RemoteFailMsg()
3808
      if msg:
3809
        raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3810
                                   msg)
3811

    
3812
    self.instance = instance
3813

    
3814
  def _WaitUntilSync(self):
3815
    """Poll with custom rpc for disk sync.
3816

3817
    This uses our own step-based rpc call.
3818

3819
    """
3820
    self.feedback_fn("* wait until resync is done")
3821
    all_done = False
3822
    while not all_done:
3823
      all_done = True
3824
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3825
                                            self.nodes_ip,
3826
                                            self.instance.disks)
3827
      min_percent = 100
3828
      for node, nres in result.items():
3829
        msg = nres.RemoteFailMsg()
3830
        if msg:
3831
          raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3832
                                   (node, msg))
3833
        node_done, node_percent = nres.payload
3834
        all_done = all_done and node_done
3835
        if node_percent is not None:
3836
          min_percent = min(min_percent, node_percent)
3837
      if not all_done:
3838
        if min_percent < 100:
3839
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3840
        time.sleep(2)
3841

    
3842
  def _EnsureSecondary(self, node):
3843
    """Demote a node to secondary.
3844

3845
    """
3846
    self.feedback_fn("* switching node %s to secondary mode" % node)
3847

    
3848
    for dev in self.instance.disks:
3849
      self.cfg.SetDiskID(dev, node)
3850

    
3851
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3852
                                          self.instance.disks)
3853
    msg = result.RemoteFailMsg()
3854
    if msg:
3855
      raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3856
                               " error %s" % (node, msg))
3857

    
3858
  def _GoStandalone(self):
3859
    """Disconnect from the network.
3860

3861
    """
3862
    self.feedback_fn("* changing into standalone mode")
3863
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3864
                                               self.instance.disks)
3865
    for node, nres in result.items():
3866
      msg = nres.RemoteFailMsg()
3867
      if msg:
3868
        raise errors.OpExecError("Cannot disconnect disks node %s,"
3869
                                 " error %s" % (node, msg))
3870

    
3871
  def _GoReconnect(self, multimaster):
3872
    """Reconnect to the network.
3873

3874
    """
3875
    if multimaster:
3876
      msg = "dual-master"
3877
    else:
3878
      msg = "single-master"
3879
    self.feedback_fn("* changing disks into %s mode" % msg)
3880
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3881
                                           self.instance.disks,
3882
                                           self.instance.name, multimaster)
3883
    for node, nres in result.items():
3884
      msg = nres.RemoteFailMsg()
3885
      if msg:
3886
        raise errors.OpExecError("Cannot change disks config on node %s,"
3887
                                 " error: %s" % (node, msg))
3888

    
3889
  def _ExecCleanup(self):
3890
    """Try to cleanup after a failed migration.
3891

3892
    The cleanup is done by:
3893
      - check that the instance is running only on one node
3894
        (and update the config if needed)
3895
      - change disks on its secondary node to secondary
3896
      - wait until disks are fully synchronized
3897
      - disconnect from the network
3898
      - change disks into single-master mode
3899
      - wait again until disks are fully synchronized
3900

3901
    """
3902
    instance = self.instance
3903
    target_node = self.target_node
3904
    source_node = self.source_node
3905

    
3906
    # check running on only one node
3907
    self.feedback_fn("* checking where the instance actually runs"
3908
                     " (if this hangs, the hypervisor might be in"
3909
                     " a bad state)")
3910
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3911
    for node, result in ins_l.items():
3912
      msg = result.RemoteFailMsg()
3913
      if msg:
3914
        raise errors.OpExecError("Can't contact node %s: %s" % (node, msg))
3915

    
3916
    runningon_source = instance.name in ins_l[source_node].payload
3917
    runningon_target = instance.name in ins_l[target_node].payload
3918

    
3919
    if runningon_source and runningon_target:
3920
      raise errors.OpExecError("Instance seems to be running on two nodes,"
3921
                               " or the hypervisor is confused. You will have"
3922
                               " to ensure manually that it runs only on one"
3923
                               " and restart this operation.")
3924

    
3925
    if not (runningon_source or runningon_target):
3926
      raise errors.OpExecError("Instance does not seem to be running at all."
3927
                               " In this case, it's safer to repair by"
3928
                               " running 'gnt-instance stop' to ensure disk"
3929
                               " shutdown, and then restarting it.")
3930

    
3931
    if runningon_target:
3932
      # the migration has actually succeeded, we need to update the config
3933
      self.feedback_fn("* instance running on secondary node (%s),"
3934
                       " updating config" % target_node)
3935
      instance.primary_node = target_node
3936
      self.cfg.Update(instance)
3937
      demoted_node = source_node
3938
    else:
3939
      self.feedback_fn("* instance confirmed to be running on its"
3940
                       " primary node (%s)" % source_node)
3941
      demoted_node = target_node
3942

    
3943
    self._EnsureSecondary(demoted_node)
3944
    try:
3945
      self._WaitUntilSync()
3946
    except errors.OpExecError:
3947
      # we ignore here errors, since if the device is standalone, it
3948
      # won't be able to sync
3949
      pass
3950
    self._GoStandalone()
3951
    self._GoReconnect(False)
3952
    self._WaitUntilSync()
3953

    
3954
    self.feedback_fn("* done")
3955

    
3956
  def _RevertDiskStatus(self):
3957
    """Try to revert the disk status after a failed migration.
3958

3959
    """
3960
    target_node = self.target_node
3961
    try:
3962
      self._EnsureSecondary(target_node)
3963
      self._GoStandalone()
3964
      self._GoReconnect(False)
3965
      self._WaitUntilSync()
3966
    except errors.OpExecError, err:
3967
      self.LogWarning("Migration failed and I can't reconnect the"
3968
                      " drives: error '%s'\n"
3969
                      "Please look and recover the instance status" %
3970
                      str(err))
3971

    
3972
  def _AbortMigration(self):
3973
    """Call the hypervisor code to abort a started migration.
3974

3975
    """
3976
    instance = self.instance
3977
    target_node = self.target_node
3978
    migration_info = self.migration_info
3979

    
3980
    abort_result = self.rpc.call_finalize_migration(target_node,
3981
                                                    instance,
3982
                                                    migration_info,
3983
                                                    False)
3984
    abort_msg = abort_result.RemoteFailMsg()
3985
    if abort_msg:
3986
      logging.error("Aborting migration failed on target node %s: %s" %
3987
                    (target_node, abort_msg))
3988
      # Don't raise an exception here, as we stil have to try to revert the
3989
      # disk status, even if this step failed.
3990

    
3991
  def _ExecMigration(self):
3992
    """Migrate an instance.
3993

3994