Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 4a2c0db0

History | View | Annotate | Download (398.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0201,C0302
25

    
26
# W0201 since most LU attributes are defined in CheckPrereq or similar
27
# functions
28

    
29
# C0302: since we have waaaay to many lines in this module
30

    
31
import os
32
import os.path
33
import time
34
import re
35
import platform
36
import logging
37
import copy
38
import OpenSSL
39
import socket
40
import tempfile
41
import shutil
42
import itertools
43

    
44
from ganeti import ssh
45
from ganeti import utils
46
from ganeti import errors
47
from ganeti import hypervisor
48
from ganeti import locking
49
from ganeti import constants
50
from ganeti import objects
51
from ganeti import serializer
52
from ganeti import ssconf
53
from ganeti import uidpool
54
from ganeti import compat
55
from ganeti import masterd
56
from ganeti import netutils
57
from ganeti import query
58
from ganeti import qlang
59
from ganeti import opcodes
60

    
61
import ganeti.masterd.instance # pylint: disable-msg=W0611
62

    
63

    
64
def _SupportsOob(cfg, node):
65
  """Tells if node supports OOB.
66

67
  @type cfg: L{config.ConfigWriter}
68
  @param cfg: The cluster configuration
69
  @type node: L{objects.Node}
70
  @param node: The node
71
  @return: The OOB script if supported or an empty string otherwise
72

73
  """
74
  return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
75

    
76

    
77
# End types
78
class LogicalUnit(object):
79
  """Logical Unit base class.
80

81
  Subclasses must follow these rules:
82
    - implement ExpandNames
83
    - implement CheckPrereq (except when tasklets are used)
84
    - implement Exec (except when tasklets are used)
85
    - implement BuildHooksEnv
86
    - redefine HPATH and HTYPE
87
    - optionally redefine their run requirements:
88
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
89

90
  Note that all commands require root permissions.
91

92
  @ivar dry_run_result: the value (if any) that will be returned to the caller
93
      in dry-run mode (signalled by opcode dry_run parameter)
94

95
  """
96
  HPATH = None
97
  HTYPE = None
98
  REQ_BGL = True
99

    
100
  def __init__(self, processor, op, context, rpc):
101
    """Constructor for LogicalUnit.
102

103
    This needs to be overridden in derived classes in order to check op
104
    validity.
105

106
    """
107
    self.proc = processor
108
    self.op = op
109
    self.cfg = context.cfg
110
    self.context = context
111
    self.rpc = rpc
112
    # Dicts used to declare locking needs to mcpu
113
    self.needed_locks = None
114
    self.acquired_locks = {}
115
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
116
    self.add_locks = {}
117
    self.remove_locks = {}
118
    # Used to force good behavior when calling helper functions
119
    self.recalculate_locks = {}
120
    self.__ssh = None
121
    # logging
122
    self.Log = processor.Log # pylint: disable-msg=C0103
123
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
124
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
125
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
126
    # support for dry-run
127
    self.dry_run_result = None
128
    # support for generic debug attribute
129
    if (not hasattr(self.op, "debug_level") or
130
        not isinstance(self.op.debug_level, int)):
131
      self.op.debug_level = 0
132

    
133
    # Tasklets
134
    self.tasklets = None
135

    
136
    # Validate opcode parameters and set defaults
137
    self.op.Validate(True)
138

    
139
    self.CheckArguments()
140

    
141
  def __GetSSH(self):
142
    """Returns the SshRunner object
143

144
    """
145
    if not self.__ssh:
146
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
147
    return self.__ssh
148

    
149
  ssh = property(fget=__GetSSH)
150

    
151
  def CheckArguments(self):
152
    """Check syntactic validity for the opcode arguments.
153

154
    This method is for doing a simple syntactic check and ensure
155
    validity of opcode parameters, without any cluster-related
156
    checks. While the same can be accomplished in ExpandNames and/or
157
    CheckPrereq, doing these separate is better because:
158

159
      - ExpandNames is left as as purely a lock-related function
160
      - CheckPrereq is run after we have acquired locks (and possible
161
        waited for them)
162

163
    The function is allowed to change the self.op attribute so that
164
    later methods can no longer worry about missing parameters.
165

166
    """
167
    pass
168

    
169
  def ExpandNames(self):
170
    """Expand names for this LU.
171

172
    This method is called before starting to execute the opcode, and it should
173
    update all the parameters of the opcode to their canonical form (e.g. a
174
    short node name must be fully expanded after this method has successfully
175
    completed). This way locking, hooks, logging, etc. can work correctly.
176

177
    LUs which implement this method must also populate the self.needed_locks
178
    member, as a dict with lock levels as keys, and a list of needed lock names
179
    as values. Rules:
180

181
      - use an empty dict if you don't need any lock
182
      - if you don't need any lock at a particular level omit that level
183
      - don't put anything for the BGL level
184
      - if you want all locks at a level use locking.ALL_SET as a value
185

186
    If you need to share locks (rather than acquire them exclusively) at one
187
    level you can modify self.share_locks, setting a true value (usually 1) for
188
    that level. By default locks are not shared.
189

190
    This function can also define a list of tasklets, which then will be
191
    executed in order instead of the usual LU-level CheckPrereq and Exec
192
    functions, if those are not defined by the LU.
193

194
    Examples::
195

196
      # Acquire all nodes and one instance
197
      self.needed_locks = {
198
        locking.LEVEL_NODE: locking.ALL_SET,
199
        locking.LEVEL_INSTANCE: ['instance1.example.com'],
200
      }
201
      # Acquire just two nodes
202
      self.needed_locks = {
203
        locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
204
      }
205
      # Acquire no locks
206
      self.needed_locks = {} # No, you can't leave it to the default value None
207

208
    """
209
    # The implementation of this method is mandatory only if the new LU is
210
    # concurrent, so that old LUs don't need to be changed all at the same
211
    # time.
212
    if self.REQ_BGL:
213
      self.needed_locks = {} # Exclusive LUs don't need locks.
214
    else:
215
      raise NotImplementedError
216

    
217
  def DeclareLocks(self, level):
218
    """Declare LU locking needs for a level
219

220
    While most LUs can just declare their locking needs at ExpandNames time,
221
    sometimes there's the need to calculate some locks after having acquired
222
    the ones before. This function is called just before acquiring locks at a
223
    particular level, but after acquiring the ones at lower levels, and permits
224
    such calculations. It can be used to modify self.needed_locks, and by
225
    default it does nothing.
226

227
    This function is only called if you have something already set in
228
    self.needed_locks for the level.
229

230
    @param level: Locking level which is going to be locked
231
    @type level: member of ganeti.locking.LEVELS
232

233
    """
234

    
235
  def CheckPrereq(self):
236
    """Check prerequisites for this LU.
237

238
    This method should check that the prerequisites for the execution
239
    of this LU are fulfilled. It can do internode communication, but
240
    it should be idempotent - no cluster or system changes are
241
    allowed.
242

243
    The method should raise errors.OpPrereqError in case something is
244
    not fulfilled. Its return value is ignored.
245

246
    This method should also update all the parameters of the opcode to
247
    their canonical form if it hasn't been done by ExpandNames before.
248

249
    """
250
    if self.tasklets is not None:
251
      for (idx, tl) in enumerate(self.tasklets):
252
        logging.debug("Checking prerequisites for tasklet %s/%s",
253
                      idx + 1, len(self.tasklets))
254
        tl.CheckPrereq()
255
    else:
256
      pass
257

    
258
  def Exec(self, feedback_fn):
259
    """Execute the LU.
260

261
    This method should implement the actual work. It should raise
262
    errors.OpExecError for failures that are somewhat dealt with in
263
    code, or expected.
264

265
    """
266
    if self.tasklets is not None:
267
      for (idx, tl) in enumerate(self.tasklets):
268
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
269
        tl.Exec(feedback_fn)
270
    else:
271
      raise NotImplementedError
272

    
273
  def BuildHooksEnv(self):
274
    """Build hooks environment for this LU.
275

276
    This method should return a three-node tuple consisting of: a dict
277
    containing the environment that will be used for running the
278
    specific hook for this LU, a list of node names on which the hook
279
    should run before the execution, and a list of node names on which
280
    the hook should run after the execution.
281

282
    The keys of the dict must not have 'GANETI_' prefixed as this will
283
    be handled in the hooks runner. Also note additional keys will be
284
    added by the hooks runner. If the LU doesn't define any
285
    environment, an empty dict (and not None) should be returned.
286

287
    No nodes should be returned as an empty list (and not None).
288

289
    Note that if the HPATH for a LU class is None, this function will
290
    not be called.
291

292
    """
293
    raise NotImplementedError
294

    
295
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
296
    """Notify the LU about the results of its hooks.
297

298
    This method is called every time a hooks phase is executed, and notifies
299
    the Logical Unit about the hooks' result. The LU can then use it to alter
300
    its result based on the hooks.  By default the method does nothing and the
301
    previous result is passed back unchanged but any LU can define it if it
302
    wants to use the local cluster hook-scripts somehow.
303

304
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
305
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
306
    @param hook_results: the results of the multi-node hooks rpc call
307
    @param feedback_fn: function used send feedback back to the caller
308
    @param lu_result: the previous Exec result this LU had, or None
309
        in the PRE phase
310
    @return: the new Exec result, based on the previous result
311
        and hook results
312

313
    """
314
    # API must be kept, thus we ignore the unused argument and could
315
    # be a function warnings
316
    # pylint: disable-msg=W0613,R0201
317
    return lu_result
318

    
319
  def _ExpandAndLockInstance(self):
320
    """Helper function to expand and lock an instance.
321

322
    Many LUs that work on an instance take its name in self.op.instance_name
323
    and need to expand it and then declare the expanded name for locking. This
324
    function does it, and then updates self.op.instance_name to the expanded
325
    name. It also initializes needed_locks as a dict, if this hasn't been done
326
    before.
327

328
    """
329
    if self.needed_locks is None:
330
      self.needed_locks = {}
331
    else:
332
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
333
        "_ExpandAndLockInstance called with instance-level locks set"
334
    self.op.instance_name = _ExpandInstanceName(self.cfg,
335
                                                self.op.instance_name)
336
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
337

    
338
  def _LockInstancesNodes(self, primary_only=False):
339
    """Helper function to declare instances' nodes for locking.
340

341
    This function should be called after locking one or more instances to lock
342
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
343
    with all primary or secondary nodes for instances already locked and
344
    present in self.needed_locks[locking.LEVEL_INSTANCE].
345

346
    It should be called from DeclareLocks, and for safety only works if
347
    self.recalculate_locks[locking.LEVEL_NODE] is set.
348

349
    In the future it may grow parameters to just lock some instance's nodes, or
350
    to just lock primaries or secondary nodes, if needed.
351

352
    If should be called in DeclareLocks in a way similar to::
353

354
      if level == locking.LEVEL_NODE:
355
        self._LockInstancesNodes()
356

357
    @type primary_only: boolean
358
    @param primary_only: only lock primary nodes of locked instances
359

360
    """
361
    assert locking.LEVEL_NODE in self.recalculate_locks, \
362
      "_LockInstancesNodes helper function called with no nodes to recalculate"
363

    
364
    # TODO: check if we're really been called with the instance locks held
365

    
366
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
367
    # future we might want to have different behaviors depending on the value
368
    # of self.recalculate_locks[locking.LEVEL_NODE]
369
    wanted_nodes = []
370
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
371
      instance = self.context.cfg.GetInstanceInfo(instance_name)
372
      wanted_nodes.append(instance.primary_node)
373
      if not primary_only:
374
        wanted_nodes.extend(instance.secondary_nodes)
375

    
376
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
377
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
378
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
379
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
380

    
381
    del self.recalculate_locks[locking.LEVEL_NODE]
382

    
383

    
384
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
385
  """Simple LU which runs no hooks.
386

387
  This LU is intended as a parent for other LogicalUnits which will
388
  run no hooks, in order to reduce duplicate code.
389

390
  """
391
  HPATH = None
392
  HTYPE = None
393

    
394
  def BuildHooksEnv(self):
395
    """Empty BuildHooksEnv for NoHooksLu.
396

397
    This just raises an error.
398

399
    """
400
    assert False, "BuildHooksEnv called for NoHooksLUs"
401

    
402

    
403
class Tasklet:
404
  """Tasklet base class.
405

406
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
407
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
408
  tasklets know nothing about locks.
409

410
  Subclasses must follow these rules:
411
    - Implement CheckPrereq
412
    - Implement Exec
413

414
  """
415
  def __init__(self, lu):
416
    self.lu = lu
417

    
418
    # Shortcuts
419
    self.cfg = lu.cfg
420
    self.rpc = lu.rpc
421

    
422
  def CheckPrereq(self):
423
    """Check prerequisites for this tasklets.
424

425
    This method should check whether the prerequisites for the execution of
426
    this tasklet are fulfilled. It can do internode communication, but it
427
    should be idempotent - no cluster or system changes are allowed.
428

429
    The method should raise errors.OpPrereqError in case something is not
430
    fulfilled. Its return value is ignored.
431

432
    This method should also update all parameters to their canonical form if it
433
    hasn't been done before.
434

435
    """
436
    pass
437

    
438
  def Exec(self, feedback_fn):
439
    """Execute the tasklet.
440

441
    This method should implement the actual work. It should raise
442
    errors.OpExecError for failures that are somewhat dealt with in code, or
443
    expected.
444

445
    """
446
    raise NotImplementedError
447

    
448

    
449
class _QueryBase:
450
  """Base for query utility classes.
451

452
  """
453
  #: Attribute holding field definitions
454
  FIELDS = None
455

    
456
  def __init__(self, names, fields, use_locking):
457
    """Initializes this class.
458

459
    """
460
    self.names = names
461
    self.use_locking = use_locking
462

    
463
    self.query = query.Query(self.FIELDS, fields)
464
    self.requested_data = self.query.RequestedData()
465

    
466
    self.do_locking = None
467
    self.wanted = None
468

    
469
  def _GetNames(self, lu, all_names, lock_level):
470
    """Helper function to determine names asked for in the query.
471

472
    """
473
    if self.do_locking:
474
      names = lu.acquired_locks[lock_level]
475
    else:
476
      names = all_names
477

    
478
    if self.wanted == locking.ALL_SET:
479
      assert not self.names
480
      # caller didn't specify names, so ordering is not important
481
      return utils.NiceSort(names)
482

    
483
    # caller specified names and we must keep the same order
484
    assert self.names
485
    assert not self.do_locking or lu.acquired_locks[lock_level]
486

    
487
    missing = set(self.wanted).difference(names)
488
    if missing:
489
      raise errors.OpExecError("Some items were removed before retrieving"
490
                               " their data: %s" % missing)
491

    
492
    # Return expanded names
493
    return self.wanted
494

    
495
  @classmethod
496
  def FieldsQuery(cls, fields):
497
    """Returns list of available fields.
498

499
    @return: List of L{objects.QueryFieldDefinition}
500

501
    """
502
    return query.QueryFields(cls.FIELDS, fields)
503

    
504
  def ExpandNames(self, lu):
505
    """Expand names for this query.
506

507
    See L{LogicalUnit.ExpandNames}.
508

509
    """
510
    raise NotImplementedError()
511

    
512
  def DeclareLocks(self, lu, level):
513
    """Declare locks for this query.
514

515
    See L{LogicalUnit.DeclareLocks}.
516

517
    """
518
    raise NotImplementedError()
519

    
520
  def _GetQueryData(self, lu):
521
    """Collects all data for this query.
522

523
    @return: Query data object
524

525
    """
526
    raise NotImplementedError()
527

    
528
  def NewStyleQuery(self, lu):
529
    """Collect data and execute query.
530

531
    """
532
    return query.GetQueryResponse(self.query, self._GetQueryData(lu))
533

    
534
  def OldStyleQuery(self, lu):
535
    """Collect data and execute query.
536

537
    """
538
    return self.query.OldStyleQuery(self._GetQueryData(lu))
539

    
540

    
541
def _GetWantedNodes(lu, nodes):
542
  """Returns list of checked and expanded node names.
543

544
  @type lu: L{LogicalUnit}
545
  @param lu: the logical unit on whose behalf we execute
546
  @type nodes: list
547
  @param nodes: list of node names or None for all nodes
548
  @rtype: list
549
  @return: the list of nodes, sorted
550
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
551

552
  """
553
  if nodes:
554
    return [_ExpandNodeName(lu.cfg, name) for name in nodes]
555

    
556
  return utils.NiceSort(lu.cfg.GetNodeList())
557

    
558

    
559
def _GetWantedInstances(lu, instances):
560
  """Returns list of checked and expanded instance names.
561

562
  @type lu: L{LogicalUnit}
563
  @param lu: the logical unit on whose behalf we execute
564
  @type instances: list
565
  @param instances: list of instance names or None for all instances
566
  @rtype: list
567
  @return: the list of instances, sorted
568
  @raise errors.OpPrereqError: if the instances parameter is wrong type
569
  @raise errors.OpPrereqError: if any of the passed instances is not found
570

571
  """
572
  if instances:
573
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
574
  else:
575
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
576
  return wanted
577

    
578

    
579
def _GetUpdatedParams(old_params, update_dict,
580
                      use_default=True, use_none=False):
581
  """Return the new version of a parameter dictionary.
582

583
  @type old_params: dict
584
  @param old_params: old parameters
585
  @type update_dict: dict
586
  @param update_dict: dict containing new parameter values, or
587
      constants.VALUE_DEFAULT to reset the parameter to its default
588
      value
589
  @param use_default: boolean
590
  @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
591
      values as 'to be deleted' values
592
  @param use_none: boolean
593
  @type use_none: whether to recognise C{None} values as 'to be
594
      deleted' values
595
  @rtype: dict
596
  @return: the new parameter dictionary
597

598
  """
599
  params_copy = copy.deepcopy(old_params)
600
  for key, val in update_dict.iteritems():
601
    if ((use_default and val == constants.VALUE_DEFAULT) or
602
        (use_none and val is None)):
603
      try:
604
        del params_copy[key]
605
      except KeyError:
606
        pass
607
    else:
608
      params_copy[key] = val
609
  return params_copy
610

    
611

    
612
def _CheckOutputFields(static, dynamic, selected):
613
  """Checks whether all selected fields are valid.
614

615
  @type static: L{utils.FieldSet}
616
  @param static: static fields set
617
  @type dynamic: L{utils.FieldSet}
618
  @param dynamic: dynamic fields set
619

620
  """
621
  f = utils.FieldSet()
622
  f.Extend(static)
623
  f.Extend(dynamic)
624

    
625
  delta = f.NonMatching(selected)
626
  if delta:
627
    raise errors.OpPrereqError("Unknown output fields selected: %s"
628
                               % ",".join(delta), errors.ECODE_INVAL)
629

    
630

    
631
def _CheckGlobalHvParams(params):
632
  """Validates that given hypervisor params are not global ones.
633

634
  This will ensure that instances don't get customised versions of
635
  global params.
636

637
  """
638
  used_globals = constants.HVC_GLOBALS.intersection(params)
639
  if used_globals:
640
    msg = ("The following hypervisor parameters are global and cannot"
641
           " be customized at instance level, please modify them at"
642
           " cluster level: %s" % utils.CommaJoin(used_globals))
643
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
644

    
645

    
646
def _CheckNodeOnline(lu, node, msg=None):
647
  """Ensure that a given node is online.
648

649
  @param lu: the LU on behalf of which we make the check
650
  @param node: the node to check
651
  @param msg: if passed, should be a message to replace the default one
652
  @raise errors.OpPrereqError: if the node is offline
653

654
  """
655
  if msg is None:
656
    msg = "Can't use offline node"
657
  if lu.cfg.GetNodeInfo(node).offline:
658
    raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
659

    
660

    
661
def _CheckNodeNotDrained(lu, node):
662
  """Ensure that a given node is not drained.
663

664
  @param lu: the LU on behalf of which we make the check
665
  @param node: the node to check
666
  @raise errors.OpPrereqError: if the node is drained
667

668
  """
669
  if lu.cfg.GetNodeInfo(node).drained:
670
    raise errors.OpPrereqError("Can't use drained node %s" % node,
671
                               errors.ECODE_STATE)
672

    
673

    
674
def _CheckNodeVmCapable(lu, node):
675
  """Ensure that a given node is vm capable.
676

677
  @param lu: the LU on behalf of which we make the check
678
  @param node: the node to check
679
  @raise errors.OpPrereqError: if the node is not vm capable
680

681
  """
682
  if not lu.cfg.GetNodeInfo(node).vm_capable:
683
    raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
684
                               errors.ECODE_STATE)
685

    
686

    
687
def _CheckNodeHasOS(lu, node, os_name, force_variant):
688
  """Ensure that a node supports a given OS.
689

690
  @param lu: the LU on behalf of which we make the check
691
  @param node: the node to check
692
  @param os_name: the OS to query about
693
  @param force_variant: whether to ignore variant errors
694
  @raise errors.OpPrereqError: if the node is not supporting the OS
695

696
  """
697
  result = lu.rpc.call_os_get(node, os_name)
698
  result.Raise("OS '%s' not in supported OS list for node %s" %
699
               (os_name, node),
700
               prereq=True, ecode=errors.ECODE_INVAL)
701
  if not force_variant:
702
    _CheckOSVariant(result.payload, os_name)
703

    
704

    
705
def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
706
  """Ensure that a node has the given secondary ip.
707

708
  @type lu: L{LogicalUnit}
709
  @param lu: the LU on behalf of which we make the check
710
  @type node: string
711
  @param node: the node to check
712
  @type secondary_ip: string
713
  @param secondary_ip: the ip to check
714
  @type prereq: boolean
715
  @param prereq: whether to throw a prerequisite or an execute error
716
  @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
717
  @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
718

719
  """
720
  result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
721
  result.Raise("Failure checking secondary ip on node %s" % node,
722
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
723
  if not result.payload:
724
    msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
725
           " please fix and re-run this command" % secondary_ip)
726
    if prereq:
727
      raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
728
    else:
729
      raise errors.OpExecError(msg)
730

    
731

    
732
def _GetClusterDomainSecret():
733
  """Reads the cluster domain secret.
734

735
  """
736
  return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
737
                               strict=True)
738

    
739

    
740
def _CheckInstanceDown(lu, instance, reason):
741
  """Ensure that an instance is not running."""
742
  if instance.admin_up:
743
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
744
                               (instance.name, reason), errors.ECODE_STATE)
745

    
746
  pnode = instance.primary_node
747
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
748
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
749
              prereq=True, ecode=errors.ECODE_ENVIRON)
750

    
751
  if instance.name in ins_l.payload:
752
    raise errors.OpPrereqError("Instance %s is running, %s" %
753
                               (instance.name, reason), errors.ECODE_STATE)
754

    
755

    
756
def _ExpandItemName(fn, name, kind):
757
  """Expand an item name.
758

759
  @param fn: the function to use for expansion
760
  @param name: requested item name
761
  @param kind: text description ('Node' or 'Instance')
762
  @return: the resolved (full) name
763
  @raise errors.OpPrereqError: if the item is not found
764

765
  """
766
  full_name = fn(name)
767
  if full_name is None:
768
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
769
                               errors.ECODE_NOENT)
770
  return full_name
771

    
772

    
773
def _ExpandNodeName(cfg, name):
774
  """Wrapper over L{_ExpandItemName} for nodes."""
775
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
776

    
777

    
778
def _ExpandInstanceName(cfg, name):
779
  """Wrapper over L{_ExpandItemName} for instance."""
780
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
781

    
782

    
783
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
784
                          memory, vcpus, nics, disk_template, disks,
785
                          bep, hvp, hypervisor_name):
786
  """Builds instance related env variables for hooks
787

788
  This builds the hook environment from individual variables.
789

790
  @type name: string
791
  @param name: the name of the instance
792
  @type primary_node: string
793
  @param primary_node: the name of the instance's primary node
794
  @type secondary_nodes: list
795
  @param secondary_nodes: list of secondary nodes as strings
796
  @type os_type: string
797
  @param os_type: the name of the instance's OS
798
  @type status: boolean
799
  @param status: the should_run status of the instance
800
  @type memory: string
801
  @param memory: the memory size of the instance
802
  @type vcpus: string
803
  @param vcpus: the count of VCPUs the instance has
804
  @type nics: list
805
  @param nics: list of tuples (ip, mac, mode, link) representing
806
      the NICs the instance has
807
  @type disk_template: string
808
  @param disk_template: the disk template of the instance
809
  @type disks: list
810
  @param disks: the list of (size, mode) pairs
811
  @type bep: dict
812
  @param bep: the backend parameters for the instance
813
  @type hvp: dict
814
  @param hvp: the hypervisor parameters for the instance
815
  @type hypervisor_name: string
816
  @param hypervisor_name: the hypervisor for the instance
817
  @rtype: dict
818
  @return: the hook environment for this instance
819

820
  """
821
  if status:
822
    str_status = "up"
823
  else:
824
    str_status = "down"
825
  env = {
826
    "OP_TARGET": name,
827
    "INSTANCE_NAME": name,
828
    "INSTANCE_PRIMARY": primary_node,
829
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
830
    "INSTANCE_OS_TYPE": os_type,
831
    "INSTANCE_STATUS": str_status,
832
    "INSTANCE_MEMORY": memory,
833
    "INSTANCE_VCPUS": vcpus,
834
    "INSTANCE_DISK_TEMPLATE": disk_template,
835
    "INSTANCE_HYPERVISOR": hypervisor_name,
836
  }
837

    
838
  if nics:
839
    nic_count = len(nics)
840
    for idx, (ip, mac, mode, link) in enumerate(nics):
841
      if ip is None:
842
        ip = ""
843
      env["INSTANCE_NIC%d_IP" % idx] = ip
844
      env["INSTANCE_NIC%d_MAC" % idx] = mac
845
      env["INSTANCE_NIC%d_MODE" % idx] = mode
846
      env["INSTANCE_NIC%d_LINK" % idx] = link
847
      if mode == constants.NIC_MODE_BRIDGED:
848
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
849
  else:
850
    nic_count = 0
851

    
852
  env["INSTANCE_NIC_COUNT"] = nic_count
853

    
854
  if disks:
855
    disk_count = len(disks)
856
    for idx, (size, mode) in enumerate(disks):
857
      env["INSTANCE_DISK%d_SIZE" % idx] = size
858
      env["INSTANCE_DISK%d_MODE" % idx] = mode
859
  else:
860
    disk_count = 0
861

    
862
  env["INSTANCE_DISK_COUNT"] = disk_count
863

    
864
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
865
    for key, value in source.items():
866
      env["INSTANCE_%s_%s" % (kind, key)] = value
867

    
868
  return env
869

    
870

    
871
def _NICListToTuple(lu, nics):
872
  """Build a list of nic information tuples.
873

874
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
875
  value in LUInstanceQueryData.
876

877
  @type lu:  L{LogicalUnit}
878
  @param lu: the logical unit on whose behalf we execute
879
  @type nics: list of L{objects.NIC}
880
  @param nics: list of nics to convert to hooks tuples
881

882
  """
883
  hooks_nics = []
884
  cluster = lu.cfg.GetClusterInfo()
885
  for nic in nics:
886
    ip = nic.ip
887
    mac = nic.mac
888
    filled_params = cluster.SimpleFillNIC(nic.nicparams)
889
    mode = filled_params[constants.NIC_MODE]
890
    link = filled_params[constants.NIC_LINK]
891
    hooks_nics.append((ip, mac, mode, link))
892
  return hooks_nics
893

    
894

    
895
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
896
  """Builds instance related env variables for hooks from an object.
897

898
  @type lu: L{LogicalUnit}
899
  @param lu: the logical unit on whose behalf we execute
900
  @type instance: L{objects.Instance}
901
  @param instance: the instance for which we should build the
902
      environment
903
  @type override: dict
904
  @param override: dictionary with key/values that will override
905
      our values
906
  @rtype: dict
907
  @return: the hook environment dictionary
908

909
  """
910
  cluster = lu.cfg.GetClusterInfo()
911
  bep = cluster.FillBE(instance)
912
  hvp = cluster.FillHV(instance)
913
  args = {
914
    'name': instance.name,
915
    'primary_node': instance.primary_node,
916
    'secondary_nodes': instance.secondary_nodes,
917
    'os_type': instance.os,
918
    'status': instance.admin_up,
919
    'memory': bep[constants.BE_MEMORY],
920
    'vcpus': bep[constants.BE_VCPUS],
921
    'nics': _NICListToTuple(lu, instance.nics),
922
    'disk_template': instance.disk_template,
923
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
924
    'bep': bep,
925
    'hvp': hvp,
926
    'hypervisor_name': instance.hypervisor,
927
  }
928
  if override:
929
    args.update(override)
930
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
931

    
932

    
933
def _AdjustCandidatePool(lu, exceptions):
934
  """Adjust the candidate pool after node operations.
935

936
  """
937
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
938
  if mod_list:
939
    lu.LogInfo("Promoted nodes to master candidate role: %s",
940
               utils.CommaJoin(node.name for node in mod_list))
941
    for name in mod_list:
942
      lu.context.ReaddNode(name)
943
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
944
  if mc_now > mc_max:
945
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
946
               (mc_now, mc_max))
947

    
948

    
949
def _DecideSelfPromotion(lu, exceptions=None):
950
  """Decide whether I should promote myself as a master candidate.
951

952
  """
953
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
954
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
955
  # the new node will increase mc_max with one, so:
956
  mc_should = min(mc_should + 1, cp_size)
957
  return mc_now < mc_should
958

    
959

    
960
def _CheckNicsBridgesExist(lu, target_nics, target_node):
961
  """Check that the brigdes needed by a list of nics exist.
962

963
  """
964
  cluster = lu.cfg.GetClusterInfo()
965
  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
966
  brlist = [params[constants.NIC_LINK] for params in paramslist
967
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
968
  if brlist:
969
    result = lu.rpc.call_bridges_exist(target_node, brlist)
970
    result.Raise("Error checking bridges on destination node '%s'" %
971
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
972

    
973

    
974
def _CheckInstanceBridgesExist(lu, instance, node=None):
975
  """Check that the brigdes needed by an instance exist.
976

977
  """
978
  if node is None:
979
    node = instance.primary_node
980
  _CheckNicsBridgesExist(lu, instance.nics, node)
981

    
982

    
983
def _CheckOSVariant(os_obj, name):
984
  """Check whether an OS name conforms to the os variants specification.
985

986
  @type os_obj: L{objects.OS}
987
  @param os_obj: OS object to check
988
  @type name: string
989
  @param name: OS name passed by the user, to check for validity
990

991
  """
992
  if not os_obj.supported_variants:
993
    return
994
  variant = objects.OS.GetVariant(name)
995
  if not variant:
996
    raise errors.OpPrereqError("OS name must include a variant",
997
                               errors.ECODE_INVAL)
998

    
999
  if variant not in os_obj.supported_variants:
1000
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1001

    
1002

    
1003
def _GetNodeInstancesInner(cfg, fn):
1004
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1005

    
1006

    
1007
def _GetNodeInstances(cfg, node_name):
1008
  """Returns a list of all primary and secondary instances on a node.
1009

1010
  """
1011

    
1012
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1013

    
1014

    
1015
def _GetNodePrimaryInstances(cfg, node_name):
1016
  """Returns primary instances on a node.
1017

1018
  """
1019
  return _GetNodeInstancesInner(cfg,
1020
                                lambda inst: node_name == inst.primary_node)
1021

    
1022

    
1023
def _GetNodeSecondaryInstances(cfg, node_name):
1024
  """Returns secondary instances on a node.
1025

1026
  """
1027
  return _GetNodeInstancesInner(cfg,
1028
                                lambda inst: node_name in inst.secondary_nodes)
1029

    
1030

    
1031
def _GetStorageTypeArgs(cfg, storage_type):
1032
  """Returns the arguments for a storage type.
1033

1034
  """
1035
  # Special case for file storage
1036
  if storage_type == constants.ST_FILE:
1037
    # storage.FileStorage wants a list of storage directories
1038
    return [[cfg.GetFileStorageDir()]]
1039

    
1040
  return []
1041

    
1042

    
1043
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1044
  faulty = []
1045

    
1046
  for dev in instance.disks:
1047
    cfg.SetDiskID(dev, node_name)
1048

    
1049
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1050
  result.Raise("Failed to get disk status from node %s" % node_name,
1051
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
1052

    
1053
  for idx, bdev_status in enumerate(result.payload):
1054
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1055
      faulty.append(idx)
1056

    
1057
  return faulty
1058

    
1059

    
1060
def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1061
  """Check the sanity of iallocator and node arguments and use the
1062
  cluster-wide iallocator if appropriate.
1063

1064
  Check that at most one of (iallocator, node) is specified. If none is
1065
  specified, then the LU's opcode's iallocator slot is filled with the
1066
  cluster-wide default iallocator.
1067

1068
  @type iallocator_slot: string
1069
  @param iallocator_slot: the name of the opcode iallocator slot
1070
  @type node_slot: string
1071
  @param node_slot: the name of the opcode target node slot
1072

1073
  """
1074
  node = getattr(lu.op, node_slot, None)
1075
  iallocator = getattr(lu.op, iallocator_slot, None)
1076

    
1077
  if node is not None and iallocator is not None:
1078
    raise errors.OpPrereqError("Do not specify both, iallocator and node.",
1079
                               errors.ECODE_INVAL)
1080
  elif node is None and iallocator is None:
1081
    default_iallocator = lu.cfg.GetDefaultIAllocator()
1082
    if default_iallocator:
1083
      setattr(lu.op, iallocator_slot, default_iallocator)
1084
    else:
1085
      raise errors.OpPrereqError("No iallocator or node given and no"
1086
                                 " cluster-wide default iallocator found."
1087
                                 " Please specify either an iallocator or a"
1088
                                 " node, or set a cluster-wide default"
1089
                                 " iallocator.")
1090

    
1091

    
1092
class LUClusterPostInit(LogicalUnit):
1093
  """Logical unit for running hooks after cluster initialization.
1094

1095
  """
1096
  HPATH = "cluster-init"
1097
  HTYPE = constants.HTYPE_CLUSTER
1098

    
1099
  def BuildHooksEnv(self):
1100
    """Build hooks env.
1101

1102
    """
1103
    env = {"OP_TARGET": self.cfg.GetClusterName()}
1104
    mn = self.cfg.GetMasterNode()
1105
    return env, [], [mn]
1106

    
1107
  def Exec(self, feedback_fn):
1108
    """Nothing to do.
1109

1110
    """
1111
    return True
1112

    
1113

    
1114
class LUClusterDestroy(LogicalUnit):
1115
  """Logical unit for destroying the cluster.
1116

1117
  """
1118
  HPATH = "cluster-destroy"
1119
  HTYPE = constants.HTYPE_CLUSTER
1120

    
1121
  def BuildHooksEnv(self):
1122
    """Build hooks env.
1123

1124
    """
1125
    env = {"OP_TARGET": self.cfg.GetClusterName()}
1126
    return env, [], []
1127

    
1128
  def CheckPrereq(self):
1129
    """Check prerequisites.
1130

1131
    This checks whether the cluster is empty.
1132

1133
    Any errors are signaled by raising errors.OpPrereqError.
1134

1135
    """
1136
    master = self.cfg.GetMasterNode()
1137

    
1138
    nodelist = self.cfg.GetNodeList()
1139
    if len(nodelist) != 1 or nodelist[0] != master:
1140
      raise errors.OpPrereqError("There are still %d node(s) in"
1141
                                 " this cluster." % (len(nodelist) - 1),
1142
                                 errors.ECODE_INVAL)
1143
    instancelist = self.cfg.GetInstanceList()
1144
    if instancelist:
1145
      raise errors.OpPrereqError("There are still %d instance(s) in"
1146
                                 " this cluster." % len(instancelist),
1147
                                 errors.ECODE_INVAL)
1148

    
1149
  def Exec(self, feedback_fn):
1150
    """Destroys the cluster.
1151

1152
    """
1153
    master = self.cfg.GetMasterNode()
1154

    
1155
    # Run post hooks on master node before it's removed
1156
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1157
    try:
1158
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1159
    except:
1160
      # pylint: disable-msg=W0702
1161
      self.LogWarning("Errors occurred running hooks on %s" % master)
1162

    
1163
    result = self.rpc.call_node_stop_master(master, False)
1164
    result.Raise("Could not disable the master role")
1165

    
1166
    return master
1167

    
1168

    
1169
def _VerifyCertificate(filename):
1170
  """Verifies a certificate for LUClusterVerify.
1171

1172
  @type filename: string
1173
  @param filename: Path to PEM file
1174

1175
  """
1176
  try:
1177
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1178
                                           utils.ReadFile(filename))
1179
  except Exception, err: # pylint: disable-msg=W0703
1180
    return (LUClusterVerify.ETYPE_ERROR,
1181
            "Failed to load X509 certificate %s: %s" % (filename, err))
1182

    
1183
  (errcode, msg) = \
1184
    utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1185
                                constants.SSL_CERT_EXPIRATION_ERROR)
1186

    
1187
  if msg:
1188
    fnamemsg = "While verifying %s: %s" % (filename, msg)
1189
  else:
1190
    fnamemsg = None
1191

    
1192
  if errcode is None:
1193
    return (None, fnamemsg)
1194
  elif errcode == utils.CERT_WARNING:
1195
    return (LUClusterVerify.ETYPE_WARNING, fnamemsg)
1196
  elif errcode == utils.CERT_ERROR:
1197
    return (LUClusterVerify.ETYPE_ERROR, fnamemsg)
1198

    
1199
  raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1200

    
1201

    
1202
class LUClusterVerify(LogicalUnit):
1203
  """Verifies the cluster status.
1204

1205
  """
1206
  HPATH = "cluster-verify"
1207
  HTYPE = constants.HTYPE_CLUSTER
1208
  REQ_BGL = False
1209

    
1210
  TCLUSTER = "cluster"
1211
  TNODE = "node"
1212
  TINSTANCE = "instance"
1213

    
1214
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1215
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1216
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1217
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1218
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1219
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1220
  EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
1221
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1222
  EINSTANCESPLITGROUPS = (TINSTANCE, "EINSTANCESPLITGROUPS")
1223
  ENODEDRBD = (TNODE, "ENODEDRBD")
1224
  ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1225
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1226
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1227
  ENODEHV = (TNODE, "ENODEHV")
1228
  ENODELVM = (TNODE, "ENODELVM")
1229
  ENODEN1 = (TNODE, "ENODEN1")
1230
  ENODENET = (TNODE, "ENODENET")
1231
  ENODEOS = (TNODE, "ENODEOS")
1232
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1233
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1234
  ENODERPC = (TNODE, "ENODERPC")
1235
  ENODESSH = (TNODE, "ENODESSH")
1236
  ENODEVERSION = (TNODE, "ENODEVERSION")
1237
  ENODESETUP = (TNODE, "ENODESETUP")
1238
  ENODETIME = (TNODE, "ENODETIME")
1239
  ENODEOOBPATH = (TNODE, "ENODEOOBPATH")
1240

    
1241
  ETYPE_FIELD = "code"
1242
  ETYPE_ERROR = "ERROR"
1243
  ETYPE_WARNING = "WARNING"
1244

    
1245
  _HOOKS_INDENT_RE = re.compile("^", re.M)
1246

    
1247
  class NodeImage(object):
1248
    """A class representing the logical and physical status of a node.
1249

1250
    @type name: string
1251
    @ivar name: the node name to which this object refers
1252
    @ivar volumes: a structure as returned from
1253
        L{ganeti.backend.GetVolumeList} (runtime)
1254
    @ivar instances: a list of running instances (runtime)
1255
    @ivar pinst: list of configured primary instances (config)
1256
    @ivar sinst: list of configured secondary instances (config)
1257
    @ivar sbp: diction of {secondary-node: list of instances} of all peers
1258
        of this node (config)
1259
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1260
    @ivar dfree: free disk, as reported by the node (runtime)
1261
    @ivar offline: the offline status (config)
1262
    @type rpc_fail: boolean
1263
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1264
        not whether the individual keys were correct) (runtime)
1265
    @type lvm_fail: boolean
1266
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1267
    @type hyp_fail: boolean
1268
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1269
    @type ghost: boolean
1270
    @ivar ghost: whether this is a known node or not (config)
1271
    @type os_fail: boolean
1272
    @ivar os_fail: whether the RPC call didn't return valid OS data
1273
    @type oslist: list
1274
    @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1275
    @type vm_capable: boolean
1276
    @ivar vm_capable: whether the node can host instances
1277

1278
    """
1279
    def __init__(self, offline=False, name=None, vm_capable=True):
1280
      self.name = name
1281
      self.volumes = {}
1282
      self.instances = []
1283
      self.pinst = []
1284
      self.sinst = []
1285
      self.sbp = {}
1286
      self.mfree = 0
1287
      self.dfree = 0
1288
      self.offline = offline
1289
      self.vm_capable = vm_capable
1290
      self.rpc_fail = False
1291
      self.lvm_fail = False
1292
      self.hyp_fail = False
1293
      self.ghost = False
1294
      self.os_fail = False
1295
      self.oslist = {}
1296

    
1297
  def ExpandNames(self):
1298
    self.needed_locks = {
1299
      locking.LEVEL_NODE: locking.ALL_SET,
1300
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1301
    }
1302
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1303

    
1304
  def _Error(self, ecode, item, msg, *args, **kwargs):
1305
    """Format an error message.
1306

1307
    Based on the opcode's error_codes parameter, either format a
1308
    parseable error code, or a simpler error string.
1309

1310
    This must be called only from Exec and functions called from Exec.
1311

1312
    """
1313
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1314
    itype, etxt = ecode
1315
    # first complete the msg
1316
    if args:
1317
      msg = msg % args
1318
    # then format the whole message
1319
    if self.op.error_codes:
1320
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1321
    else:
1322
      if item:
1323
        item = " " + item
1324
      else:
1325
        item = ""
1326
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1327
    # and finally report it via the feedback_fn
1328
    self._feedback_fn("  - %s" % msg)
1329

    
1330
  def _ErrorIf(self, cond, *args, **kwargs):
1331
    """Log an error message if the passed condition is True.
1332

1333
    """
1334
    cond = bool(cond) or self.op.debug_simulate_errors
1335
    if cond:
1336
      self._Error(*args, **kwargs)
1337
    # do not mark the operation as failed for WARN cases only
1338
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1339
      self.bad = self.bad or cond
1340

    
1341
  def _VerifyNode(self, ninfo, nresult):
1342
    """Perform some basic validation on data returned from a node.
1343

1344
      - check the result data structure is well formed and has all the
1345
        mandatory fields
1346
      - check ganeti version
1347

1348
    @type ninfo: L{objects.Node}
1349
    @param ninfo: the node to check
1350
    @param nresult: the results from the node
1351
    @rtype: boolean
1352
    @return: whether overall this call was successful (and we can expect
1353
         reasonable values in the respose)
1354

1355
    """
1356
    node = ninfo.name
1357
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1358

    
1359
    # main result, nresult should be a non-empty dict
1360
    test = not nresult or not isinstance(nresult, dict)
1361
    _ErrorIf(test, self.ENODERPC, node,
1362
                  "unable to verify node: no data returned")
1363
    if test:
1364
      return False
1365

    
1366
    # compares ganeti version
1367
    local_version = constants.PROTOCOL_VERSION
1368
    remote_version = nresult.get("version", None)
1369
    test = not (remote_version and
1370
                isinstance(remote_version, (list, tuple)) and
1371
                len(remote_version) == 2)
1372
    _ErrorIf(test, self.ENODERPC, node,
1373
             "connection to node returned invalid data")
1374
    if test:
1375
      return False
1376

    
1377
    test = local_version != remote_version[0]
1378
    _ErrorIf(test, self.ENODEVERSION, node,
1379
             "incompatible protocol versions: master %s,"
1380
             " node %s", local_version, remote_version[0])
1381
    if test:
1382
      return False
1383

    
1384
    # node seems compatible, we can actually try to look into its results
1385

    
1386
    # full package version
1387
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1388
                  self.ENODEVERSION, node,
1389
                  "software version mismatch: master %s, node %s",
1390
                  constants.RELEASE_VERSION, remote_version[1],
1391
                  code=self.ETYPE_WARNING)
1392

    
1393
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1394
    if ninfo.vm_capable and isinstance(hyp_result, dict):
1395
      for hv_name, hv_result in hyp_result.iteritems():
1396
        test = hv_result is not None
1397
        _ErrorIf(test, self.ENODEHV, node,
1398
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1399

    
1400
    hvp_result = nresult.get(constants.NV_HVPARAMS, None)
1401
    if ninfo.vm_capable and isinstance(hvp_result, list):
1402
      for item, hv_name, hv_result in hvp_result:
1403
        _ErrorIf(True, self.ENODEHV, node,
1404
                 "hypervisor %s parameter verify failure (source %s): %s",
1405
                 hv_name, item, hv_result)
1406

    
1407
    test = nresult.get(constants.NV_NODESETUP,
1408
                           ["Missing NODESETUP results"])
1409
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1410
             "; ".join(test))
1411

    
1412
    return True
1413

    
1414
  def _VerifyNodeTime(self, ninfo, nresult,
1415
                      nvinfo_starttime, nvinfo_endtime):
1416
    """Check the node time.
1417

1418
    @type ninfo: L{objects.Node}
1419
    @param ninfo: the node to check
1420
    @param nresult: the remote results for the node
1421
    @param nvinfo_starttime: the start time of the RPC call
1422
    @param nvinfo_endtime: the end time of the RPC call
1423

1424
    """
1425
    node = ninfo.name
1426
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1427

    
1428
    ntime = nresult.get(constants.NV_TIME, None)
1429
    try:
1430
      ntime_merged = utils.MergeTime(ntime)
1431
    except (ValueError, TypeError):
1432
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1433
      return
1434

    
1435
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1436
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1437
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1438
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1439
    else:
1440
      ntime_diff = None
1441

    
1442
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1443
             "Node time diverges by at least %s from master node time",
1444
             ntime_diff)
1445

    
1446
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1447
    """Check the node time.
1448

1449
    @type ninfo: L{objects.Node}
1450
    @param ninfo: the node to check
1451
    @param nresult: the remote results for the node
1452
    @param vg_name: the configured VG name
1453

1454
    """
1455
    if vg_name is None:
1456
      return
1457

    
1458
    node = ninfo.name
1459
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1460

    
1461
    # checks vg existence and size > 20G
1462
    vglist = nresult.get(constants.NV_VGLIST, None)
1463
    test = not vglist
1464
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1465
    if not test:
1466
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1467
                                            constants.MIN_VG_SIZE)
1468
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1469

    
1470
    # check pv names
1471
    pvlist = nresult.get(constants.NV_PVLIST, None)
1472
    test = pvlist is None
1473
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1474
    if not test:
1475
      # check that ':' is not present in PV names, since it's a
1476
      # special character for lvcreate (denotes the range of PEs to
1477
      # use on the PV)
1478
      for _, pvname, owner_vg in pvlist:
1479
        test = ":" in pvname
1480
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1481
                 " '%s' of VG '%s'", pvname, owner_vg)
1482

    
1483
  def _VerifyNodeNetwork(self, ninfo, nresult):
1484
    """Check the node time.
1485

1486
    @type ninfo: L{objects.Node}
1487
    @param ninfo: the node to check
1488
    @param nresult: the remote results for the node
1489

1490
    """
1491
    node = ninfo.name
1492
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1493

    
1494
    test = constants.NV_NODELIST not in nresult
1495
    _ErrorIf(test, self.ENODESSH, node,
1496
             "node hasn't returned node ssh connectivity data")
1497
    if not test:
1498
      if nresult[constants.NV_NODELIST]:
1499
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1500
          _ErrorIf(True, self.ENODESSH, node,
1501
                   "ssh communication with node '%s': %s", a_node, a_msg)
1502

    
1503
    test = constants.NV_NODENETTEST not in nresult
1504
    _ErrorIf(test, self.ENODENET, node,
1505
             "node hasn't returned node tcp connectivity data")
1506
    if not test:
1507
      if nresult[constants.NV_NODENETTEST]:
1508
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1509
        for anode in nlist:
1510
          _ErrorIf(True, self.ENODENET, node,
1511
                   "tcp communication with node '%s': %s",
1512
                   anode, nresult[constants.NV_NODENETTEST][anode])
1513

    
1514
    test = constants.NV_MASTERIP not in nresult
1515
    _ErrorIf(test, self.ENODENET, node,
1516
             "node hasn't returned node master IP reachability data")
1517
    if not test:
1518
      if not nresult[constants.NV_MASTERIP]:
1519
        if node == self.master_node:
1520
          msg = "the master node cannot reach the master IP (not configured?)"
1521
        else:
1522
          msg = "cannot reach the master IP"
1523
        _ErrorIf(True, self.ENODENET, node, msg)
1524

    
1525
  def _VerifyInstance(self, instance, instanceconfig, node_image,
1526
                      diskstatus):
1527
    """Verify an instance.
1528

1529
    This function checks to see if the required block devices are
1530
    available on the instance's node.
1531

1532
    """
1533
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1534
    node_current = instanceconfig.primary_node
1535

    
1536
    node_vol_should = {}
1537
    instanceconfig.MapLVsByNode(node_vol_should)
1538

    
1539
    for node in node_vol_should:
1540
      n_img = node_image[node]
1541
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1542
        # ignore missing volumes on offline or broken nodes
1543
        continue
1544
      for volume in node_vol_should[node]:
1545
        test = volume not in n_img.volumes
1546
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1547
                 "volume %s missing on node %s", volume, node)
1548

    
1549
    if instanceconfig.admin_up:
1550
      pri_img = node_image[node_current]
1551
      test = instance not in pri_img.instances and not pri_img.offline
1552
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1553
               "instance not running on its primary node %s",
1554
               node_current)
1555

    
1556
    for node, n_img in node_image.items():
1557
      if node != node_current:
1558
        test = instance in n_img.instances
1559
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1560
                 "instance should not run on node %s", node)
1561

    
1562
    diskdata = [(nname, success, status, idx)
1563
                for (nname, disks) in diskstatus.items()
1564
                for idx, (success, status) in enumerate(disks)]
1565

    
1566
    for nname, success, bdev_status, idx in diskdata:
1567
      # the 'ghost node' construction in Exec() ensures that we have a
1568
      # node here
1569
      snode = node_image[nname]
1570
      bad_snode = snode.ghost or snode.offline
1571
      _ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
1572
               self.EINSTANCEFAULTYDISK, instance,
1573
               "couldn't retrieve status for disk/%s on %s: %s",
1574
               idx, nname, bdev_status)
1575
      _ErrorIf((instanceconfig.admin_up and success and
1576
                bdev_status.ldisk_status == constants.LDS_FAULTY),
1577
               self.EINSTANCEFAULTYDISK, instance,
1578
               "disk/%s on %s is faulty", idx, nname)
1579

    
1580
  def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
1581
    """Verify if there are any unknown volumes in the cluster.
1582

1583
    The .os, .swap and backup volumes are ignored. All other volumes are
1584
    reported as unknown.
1585

1586
    @type reserved: L{ganeti.utils.FieldSet}
1587
    @param reserved: a FieldSet of reserved volume names
1588

1589
    """
1590
    for node, n_img in node_image.items():
1591
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1592
        # skip non-healthy nodes
1593
        continue
1594
      for volume in n_img.volumes:
1595
        test = ((node not in node_vol_should or
1596
                volume not in node_vol_should[node]) and
1597
                not reserved.Matches(volume))
1598
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1599
                      "volume %s is unknown", volume)
1600

    
1601
  def _VerifyOrphanInstances(self, instancelist, node_image):
1602
    """Verify the list of running instances.
1603

1604
    This checks what instances are running but unknown to the cluster.
1605

1606
    """
1607
    for node, n_img in node_image.items():
1608
      for o_inst in n_img.instances:
1609
        test = o_inst not in instancelist
1610
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1611
                      "instance %s on node %s should not exist", o_inst, node)
1612

    
1613
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1614
    """Verify N+1 Memory Resilience.
1615

1616
    Check that if one single node dies we can still start all the
1617
    instances it was primary for.
1618

1619
    """
1620
    for node, n_img in node_image.items():
1621
      # This code checks that every node which is now listed as
1622
      # secondary has enough memory to host all instances it is
1623
      # supposed to should a single other node in the cluster fail.
1624
      # FIXME: not ready for failover to an arbitrary node
1625
      # FIXME: does not support file-backed instances
1626
      # WARNING: we currently take into account down instances as well
1627
      # as up ones, considering that even if they're down someone
1628
      # might want to start them even in the event of a node failure.
1629
      if n_img.offline:
1630
        # we're skipping offline nodes from the N+1 warning, since
1631
        # most likely we don't have good memory infromation from them;
1632
        # we already list instances living on such nodes, and that's
1633
        # enough warning
1634
        continue
1635
      for prinode, instances in n_img.sbp.items():
1636
        needed_mem = 0
1637
        for instance in instances:
1638
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1639
          if bep[constants.BE_AUTO_BALANCE]:
1640
            needed_mem += bep[constants.BE_MEMORY]
1641
        test = n_img.mfree < needed_mem
1642
        self._ErrorIf(test, self.ENODEN1, node,
1643
                      "not enough memory to accomodate instance failovers"
1644
                      " should node %s fail (%dMiB needed, %dMiB available)",
1645
                      prinode, needed_mem, n_img.mfree)
1646

    
1647
  def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1648
                       master_files):
1649
    """Verifies and computes the node required file checksums.
1650

1651
    @type ninfo: L{objects.Node}
1652
    @param ninfo: the node to check
1653
    @param nresult: the remote results for the node
1654
    @param file_list: required list of files
1655
    @param local_cksum: dictionary of local files and their checksums
1656
    @param master_files: list of files that only masters should have
1657

1658
    """
1659
    node = ninfo.name
1660
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1661

    
1662
    remote_cksum = nresult.get(constants.NV_FILELIST, None)
1663
    test = not isinstance(remote_cksum, dict)
1664
    _ErrorIf(test, self.ENODEFILECHECK, node,
1665
             "node hasn't returned file checksum data")
1666
    if test:
1667
      return
1668

    
1669
    for file_name in file_list:
1670
      node_is_mc = ninfo.master_candidate
1671
      must_have = (file_name not in master_files) or node_is_mc
1672
      # missing
1673
      test1 = file_name not in remote_cksum
1674
      # invalid checksum
1675
      test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1676
      # existing and good
1677
      test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1678
      _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1679
               "file '%s' missing", file_name)
1680
      _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1681
               "file '%s' has wrong checksum", file_name)
1682
      # not candidate and this is not a must-have file
1683
      _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1684
               "file '%s' should not exist on non master"
1685
               " candidates (and the file is outdated)", file_name)
1686
      # all good, except non-master/non-must have combination
1687
      _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1688
               "file '%s' should not exist"
1689
               " on non master candidates", file_name)
1690

    
1691
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
1692
                      drbd_map):
1693
    """Verifies and the node DRBD status.
1694

1695
    @type ninfo: L{objects.Node}
1696
    @param ninfo: the node to check
1697
    @param nresult: the remote results for the node
1698
    @param instanceinfo: the dict of instances
1699
    @param drbd_helper: the configured DRBD usermode helper
1700
    @param drbd_map: the DRBD map as returned by
1701
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1702

1703
    """
1704
    node = ninfo.name
1705
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1706

    
1707
    if drbd_helper:
1708
      helper_result = nresult.get(constants.NV_DRBDHELPER, None)
1709
      test = (helper_result == None)
1710
      _ErrorIf(test, self.ENODEDRBDHELPER, node,
1711
               "no drbd usermode helper returned")
1712
      if helper_result:
1713
        status, payload = helper_result
1714
        test = not status
1715
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
1716
                 "drbd usermode helper check unsuccessful: %s", payload)
1717
        test = status and (payload != drbd_helper)
1718
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
1719
                 "wrong drbd usermode helper: %s", payload)
1720

    
1721
    # compute the DRBD minors
1722
    node_drbd = {}
1723
    for minor, instance in drbd_map[node].items():
1724
      test = instance not in instanceinfo
1725
      _ErrorIf(test, self.ECLUSTERCFG, None,
1726
               "ghost instance '%s' in temporary DRBD map", instance)
1727
        # ghost instance should not be running, but otherwise we
1728
        # don't give double warnings (both ghost instance and
1729
        # unallocated minor in use)
1730
      if test:
1731
        node_drbd[minor] = (instance, False)
1732
      else:
1733
        instance = instanceinfo[instance]
1734
        node_drbd[minor] = (instance.name, instance.admin_up)
1735

    
1736
    # and now check them
1737
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
1738
    test = not isinstance(used_minors, (tuple, list))
1739
    _ErrorIf(test, self.ENODEDRBD, node,
1740
             "cannot parse drbd status file: %s", str(used_minors))
1741
    if test:
1742
      # we cannot check drbd status
1743
      return
1744

    
1745
    for minor, (iname, must_exist) in node_drbd.items():
1746
      test = minor not in used_minors and must_exist
1747
      _ErrorIf(test, self.ENODEDRBD, node,
1748
               "drbd minor %d of instance %s is not active", minor, iname)
1749
    for minor in used_minors:
1750
      test = minor not in node_drbd
1751
      _ErrorIf(test, self.ENODEDRBD, node,
1752
               "unallocated drbd minor %d is in use", minor)
1753

    
1754
  def _UpdateNodeOS(self, ninfo, nresult, nimg):
1755
    """Builds the node OS structures.
1756

1757
    @type ninfo: L{objects.Node}
1758
    @param ninfo: the node to check
1759
    @param nresult: the remote results for the node
1760
    @param nimg: the node image object
1761

1762
    """
1763
    node = ninfo.name
1764
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1765

    
1766
    remote_os = nresult.get(constants.NV_OSLIST, None)
1767
    test = (not isinstance(remote_os, list) or
1768
            not compat.all(isinstance(v, list) and len(v) == 7
1769
                           for v in remote_os))
1770

    
1771
    _ErrorIf(test, self.ENODEOS, node,
1772
             "node hasn't returned valid OS data")
1773

    
1774
    nimg.os_fail = test
1775

    
1776
    if test:
1777
      return
1778

    
1779
    os_dict = {}
1780

    
1781
    for (name, os_path, status, diagnose,
1782
         variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
1783

    
1784
      if name not in os_dict:
1785
        os_dict[name] = []
1786

    
1787
      # parameters is a list of lists instead of list of tuples due to
1788
      # JSON lacking a real tuple type, fix it:
1789
      parameters = [tuple(v) for v in parameters]
1790
      os_dict[name].append((os_path, status, diagnose,
1791
                            set(variants), set(parameters), set(api_ver)))
1792

    
1793
    nimg.oslist = os_dict
1794

    
1795
  def _VerifyNodeOS(self, ninfo, nimg, base):
1796
    """Verifies the node OS list.
1797

1798
    @type ninfo: L{objects.Node}
1799
    @param ninfo: the node to check
1800
    @param nimg: the node image object
1801
    @param base: the 'template' node we match against (e.g. from the master)
1802

1803
    """
1804
    node = ninfo.name
1805
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1806

    
1807
    assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
1808

    
1809
    for os_name, os_data in nimg.oslist.items():
1810
      assert os_data, "Empty OS status for OS %s?!" % os_name
1811
      f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
1812
      _ErrorIf(not f_status, self.ENODEOS, node,
1813
               "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
1814
      _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
1815
               "OS '%s' has multiple entries (first one shadows the rest): %s",
1816
               os_name, utils.CommaJoin([v[0] for v in os_data]))
1817
      # this will catched in backend too
1818
      _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
1819
               and not f_var, self.ENODEOS, node,
1820
               "OS %s with API at least %d does not declare any variant",
1821
               os_name, constants.OS_API_V15)
1822
      # comparisons with the 'base' image
1823
      test = os_name not in base.oslist
1824
      _ErrorIf(test, self.ENODEOS, node,
1825
               "Extra OS %s not present on reference node (%s)",
1826
               os_name, base.name)
1827
      if test:
1828
        continue
1829
      assert base.oslist[os_name], "Base node has empty OS status?"
1830
      _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
1831
      if not b_status:
1832
        # base OS is invalid, skipping
1833
        continue
1834
      for kind, a, b in [("API version", f_api, b_api),
1835
                         ("variants list", f_var, b_var),
1836
                         ("parameters", f_param, b_param)]:
1837
        _ErrorIf(a != b, self.ENODEOS, node,
1838
                 "OS %s %s differs from reference node %s: %s vs. %s",
1839
                 kind, os_name, base.name,
1840
                 utils.CommaJoin(a), utils.CommaJoin(b))
1841

    
1842
    # check any missing OSes
1843
    missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
1844
    _ErrorIf(missing, self.ENODEOS, node,
1845
             "OSes present on reference node %s but missing on this node: %s",
1846
             base.name, utils.CommaJoin(missing))
1847

    
1848
  def _VerifyOob(self, ninfo, nresult):
1849
    """Verifies out of band functionality of a node.
1850

1851
    @type ninfo: L{objects.Node}
1852
    @param ninfo: the node to check
1853
    @param nresult: the remote results for the node
1854

1855
    """
1856
    node = ninfo.name
1857
    # We just have to verify the paths on master and/or master candidates
1858
    # as the oob helper is invoked on the master
1859
    if ((ninfo.master_candidate or ninfo.master_capable) and
1860
        constants.NV_OOB_PATHS in nresult):
1861
      for path_result in nresult[constants.NV_OOB_PATHS]:
1862
        self._ErrorIf(path_result, self.ENODEOOBPATH, node, path_result)
1863

    
1864
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1865
    """Verifies and updates the node volume data.
1866

1867
    This function will update a L{NodeImage}'s internal structures
1868
    with data from the remote call.
1869

1870
    @type ninfo: L{objects.Node}
1871
    @param ninfo: the node to check
1872
    @param nresult: the remote results for the node
1873
    @param nimg: the node image object
1874
    @param vg_name: the configured VG name
1875

1876
    """
1877
    node = ninfo.name
1878
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1879

    
1880
    nimg.lvm_fail = True
1881
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1882
    if vg_name is None:
1883
      pass
1884
    elif isinstance(lvdata, basestring):
1885
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1886
               utils.SafeEncode(lvdata))
1887
    elif not isinstance(lvdata, dict):
1888
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1889
    else:
1890
      nimg.volumes = lvdata
1891
      nimg.lvm_fail = False
1892

    
1893
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1894
    """Verifies and updates the node instance list.
1895

1896
    If the listing was successful, then updates this node's instance
1897
    list. Otherwise, it marks the RPC call as failed for the instance
1898
    list key.
1899

1900
    @type ninfo: L{objects.Node}
1901
    @param ninfo: the node to check
1902
    @param nresult: the remote results for the node
1903
    @param nimg: the node image object
1904

1905
    """
1906
    idata = nresult.get(constants.NV_INSTANCELIST, None)
1907
    test = not isinstance(idata, list)
1908
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1909
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
1910
    if test:
1911
      nimg.hyp_fail = True
1912
    else:
1913
      nimg.instances = idata
1914

    
1915
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1916
    """Verifies and computes a node information map
1917

1918
    @type ninfo: L{objects.Node}
1919
    @param ninfo: the node to check
1920
    @param nresult: the remote results for the node
1921
    @param nimg: the node image object
1922
    @param vg_name: the configured VG name
1923

1924
    """
1925
    node = ninfo.name
1926
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1927

    
1928
    # try to read free memory (from the hypervisor)
1929
    hv_info = nresult.get(constants.NV_HVINFO, None)
1930
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1931
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1932
    if not test:
1933
      try:
1934
        nimg.mfree = int(hv_info["memory_free"])
1935
      except (ValueError, TypeError):
1936
        _ErrorIf(True, self.ENODERPC, node,
1937
                 "node returned invalid nodeinfo, check hypervisor")
1938

    
1939
    # FIXME: devise a free space model for file based instances as well
1940
    if vg_name is not None:
1941
      test = (constants.NV_VGLIST not in nresult or
1942
              vg_name not in nresult[constants.NV_VGLIST])
1943
      _ErrorIf(test, self.ENODELVM, node,
1944
               "node didn't return data for the volume group '%s'"
1945
               " - it is either missing or broken", vg_name)
1946
      if not test:
1947
        try:
1948
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1949
        except (ValueError, TypeError):
1950
          _ErrorIf(True, self.ENODERPC, node,
1951
                   "node returned invalid LVM info, check LVM status")
1952

    
1953
  def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
1954
    """Gets per-disk status information for all instances.
1955

1956
    @type nodelist: list of strings
1957
    @param nodelist: Node names
1958
    @type node_image: dict of (name, L{objects.Node})
1959
    @param node_image: Node objects
1960
    @type instanceinfo: dict of (name, L{objects.Instance})
1961
    @param instanceinfo: Instance objects
1962
    @rtype: {instance: {node: [(succes, payload)]}}
1963
    @return: a dictionary of per-instance dictionaries with nodes as
1964
        keys and disk information as values; the disk information is a
1965
        list of tuples (success, payload)
1966

1967
    """
1968
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1969

    
1970
    node_disks = {}
1971
    node_disks_devonly = {}
1972
    diskless_instances = set()
1973
    diskless = constants.DT_DISKLESS
1974

    
1975
    for nname in nodelist:
1976
      node_instances = list(itertools.chain(node_image[nname].pinst,
1977
                                            node_image[nname].sinst))
1978
      diskless_instances.update(inst for inst in node_instances
1979
                                if instanceinfo[inst].disk_template == diskless)
1980
      disks = [(inst, disk)
1981
               for inst in node_instances
1982
               for disk in instanceinfo[inst].disks]
1983

    
1984
      if not disks:
1985
        # No need to collect data
1986
        continue
1987

    
1988
      node_disks[nname] = disks
1989

    
1990
      # Creating copies as SetDiskID below will modify the objects and that can
1991
      # lead to incorrect data returned from nodes
1992
      devonly = [dev.Copy() for (_, dev) in disks]
1993

    
1994
      for dev in devonly:
1995
        self.cfg.SetDiskID(dev, nname)
1996

    
1997
      node_disks_devonly[nname] = devonly
1998

    
1999
    assert len(node_disks) == len(node_disks_devonly)
2000

    
2001
    # Collect data from all nodes with disks
2002
    result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2003
                                                          node_disks_devonly)
2004

    
2005
    assert len(result) == len(node_disks)
2006

    
2007
    instdisk = {}
2008

    
2009
    for (nname, nres) in result.items():
2010
      disks = node_disks[nname]
2011

    
2012
      if nres.offline:
2013
        # No data from this node
2014
        data = len(disks) * [(False, "node offline")]
2015
      else:
2016
        msg = nres.fail_msg
2017
        _ErrorIf(msg, self.ENODERPC, nname,
2018
                 "while getting disk information: %s", msg)
2019
        if msg:
2020
          # No data from this node
2021
          data = len(disks) * [(False, msg)]
2022
        else:
2023
          data = []
2024
          for idx, i in enumerate(nres.payload):
2025
            if isinstance(i, (tuple, list)) and len(i) == 2:
2026
              data.append(i)
2027
            else:
2028
              logging.warning("Invalid result from node %s, entry %d: %s",
2029
                              nname, idx, i)
2030
              data.append((False, "Invalid result from the remote node"))
2031

    
2032
      for ((inst, _), status) in zip(disks, data):
2033
        instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2034

    
2035
    # Add empty entries for diskless instances.
2036
    for inst in diskless_instances:
2037
      assert inst not in instdisk
2038
      instdisk[inst] = {}
2039

    
2040
    assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2041
                      len(nnames) <= len(instanceinfo[inst].all_nodes) and
2042
                      compat.all(isinstance(s, (tuple, list)) and
2043
                                 len(s) == 2 for s in statuses)
2044
                      for inst, nnames in instdisk.items()
2045
                      for nname, statuses in nnames.items())
2046
    assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2047

    
2048
    return instdisk
2049

    
2050
  def _VerifyHVP(self, hvp_data):
2051
    """Verifies locally the syntax of the hypervisor parameters.
2052

2053
    """
2054
    for item, hv_name, hv_params in hvp_data:
2055
      msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
2056
             (item, hv_name))
2057
      try:
2058
        hv_class = hypervisor.GetHypervisor(hv_name)
2059
        utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2060
        hv_class.CheckParameterSyntax(hv_params)
2061
      except errors.GenericError, err:
2062
        self._ErrorIf(True, self.ECLUSTERCFG, None, msg % str(err))
2063

    
2064

    
2065
  def BuildHooksEnv(self):
2066
    """Build hooks env.
2067

2068
    Cluster-Verify hooks just ran in the post phase and their failure makes
2069
    the output be logged in the verify output and the verification to fail.
2070

2071
    """
2072
    all_nodes = self.cfg.GetNodeList()
2073
    env = {
2074
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
2075
      }
2076
    for node in self.cfg.GetAllNodesInfo().values():
2077
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
2078

    
2079
    return env, [], all_nodes
2080

    
2081
  def Exec(self, feedback_fn):
2082
    """Verify integrity of cluster, performing various test on nodes.
2083

2084
    """
2085
    # This method has too many local variables. pylint: disable-msg=R0914
2086
    self.bad = False
2087
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2088
    verbose = self.op.verbose
2089
    self._feedback_fn = feedback_fn
2090
    feedback_fn("* Verifying global settings")
2091
    for msg in self.cfg.VerifyConfig():
2092
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
2093

    
2094
    # Check the cluster certificates
2095
    for cert_filename in constants.ALL_CERT_FILES:
2096
      (errcode, msg) = _VerifyCertificate(cert_filename)
2097
      _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
2098

    
2099
    vg_name = self.cfg.GetVGName()
2100
    drbd_helper = self.cfg.GetDRBDHelper()
2101
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2102
    cluster = self.cfg.GetClusterInfo()
2103
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
2104
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
2105
    nodeinfo_byname = dict(zip(nodelist, nodeinfo))
2106
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
2107
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
2108
                        for iname in instancelist)
2109
    groupinfo = self.cfg.GetAllNodeGroupsInfo()
2110
    i_non_redundant = [] # Non redundant instances
2111
    i_non_a_balanced = [] # Non auto-balanced instances
2112
    n_offline = 0 # Count of offline nodes
2113
    n_drained = 0 # Count of nodes being drained
2114
    node_vol_should = {}
2115

    
2116
    # FIXME: verify OS list
2117
    # do local checksums
2118
    master_files = [constants.CLUSTER_CONF_FILE]
2119
    master_node = self.master_node = self.cfg.GetMasterNode()
2120
    master_ip = self.cfg.GetMasterIP()
2121

    
2122
    file_names = ssconf.SimpleStore().GetFileList()
2123
    file_names.extend(constants.ALL_CERT_FILES)
2124
    file_names.extend(master_files)
2125
    if cluster.modify_etc_hosts:
2126
      file_names.append(constants.ETC_HOSTS)
2127

    
2128
    local_checksums = utils.FingerprintFiles(file_names)
2129

    
2130
    # Compute the set of hypervisor parameters
2131
    hvp_data = []
2132
    for hv_name in hypervisors:
2133
      hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
2134
    for os_name, os_hvp in cluster.os_hvp.items():
2135
      for hv_name, hv_params in os_hvp.items():
2136
        if not hv_params:
2137
          continue
2138
        full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
2139
        hvp_data.append(("os %s" % os_name, hv_name, full_params))
2140
    # TODO: collapse identical parameter values in a single one
2141
    for instance in instanceinfo.values():
2142
      if not instance.hvparams:
2143
        continue
2144
      hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
2145
                       cluster.FillHV(instance)))
2146
    # and verify them locally
2147
    self._VerifyHVP(hvp_data)
2148

    
2149
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
2150
    node_verify_param = {
2151
      constants.NV_FILELIST: file_names,
2152
      constants.NV_NODELIST: [node.name for node in nodeinfo
2153
                              if not node.offline],
2154
      constants.NV_HYPERVISOR: hypervisors,
2155
      constants.NV_HVPARAMS: hvp_data,
2156
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
2157
                                  node.secondary_ip) for node in nodeinfo
2158
                                 if not node.offline],
2159
      constants.NV_INSTANCELIST: hypervisors,
2160
      constants.NV_VERSION: None,
2161
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2162
      constants.NV_NODESETUP: None,
2163
      constants.NV_TIME: None,
2164
      constants.NV_MASTERIP: (master_node, master_ip),
2165
      constants.NV_OSLIST: None,
2166
      constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
2167
      }
2168

    
2169
    if vg_name is not None:
2170
      node_verify_param[constants.NV_VGLIST] = None
2171
      node_verify_param[constants.NV_LVLIST] = vg_name
2172
      node_verify_param[constants.NV_PVLIST] = [vg_name]
2173
      node_verify_param[constants.NV_DRBDLIST] = None
2174

    
2175
    if drbd_helper:
2176
      node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2177

    
2178
    # Build our expected cluster state
2179
    node_image = dict((node.name, self.NodeImage(offline=node.offline,
2180
                                                 name=node.name,
2181
                                                 vm_capable=node.vm_capable))
2182
                      for node in nodeinfo)
2183

    
2184
    # Gather OOB paths
2185
    oob_paths = []
2186
    for node in nodeinfo:
2187
      path = _SupportsOob(self.cfg, node)
2188
      if path and path not in oob_paths:
2189
        oob_paths.append(path)
2190

    
2191
    if oob_paths:
2192
      node_verify_param[constants.NV_OOB_PATHS] = oob_paths
2193

    
2194
    for instance in instancelist:
2195
      inst_config = instanceinfo[instance]
2196

    
2197
      for nname in inst_config.all_nodes:
2198
        if nname not in node_image:
2199
          # ghost node
2200
          gnode = self.NodeImage(name=nname)
2201
          gnode.ghost = True
2202
          node_image[nname] = gnode
2203

    
2204
      inst_config.MapLVsByNode(node_vol_should)
2205

    
2206
      pnode = inst_config.primary_node
2207
      node_image[pnode].pinst.append(instance)
2208

    
2209
      for snode in inst_config.secondary_nodes:
2210
        nimg = node_image[snode]
2211
        nimg.sinst.append(instance)
2212
        if pnode not in nimg.sbp:
2213
          nimg.sbp[pnode] = []
2214
        nimg.sbp[pnode].append(instance)
2215

    
2216
    # At this point, we have the in-memory data structures complete,
2217
    # except for the runtime information, which we'll gather next
2218

    
2219
    # Due to the way our RPC system works, exact response times cannot be
2220
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2221
    # time before and after executing the request, we can at least have a time
2222
    # window.
2223
    nvinfo_starttime = time.time()
2224
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
2225
                                           self.cfg.GetClusterName())
2226
    nvinfo_endtime = time.time()
2227

    
2228
    all_drbd_map = self.cfg.ComputeDRBDMap()
2229

    
2230
    feedback_fn("* Gathering disk information (%s nodes)" % len(nodelist))
2231
    instdisk = self._CollectDiskInfo(nodelist, node_image, instanceinfo)
2232

    
2233
    feedback_fn("* Verifying node status")
2234

    
2235
    refos_img = None
2236

    
2237
    for node_i in nodeinfo:
2238
      node = node_i.name
2239
      nimg = node_image[node]
2240

    
2241
      if node_i.offline:
2242
        if verbose:
2243
          feedback_fn("* Skipping offline node %s" % (node,))
2244
        n_offline += 1
2245
        continue
2246

    
2247
      if node == master_node:
2248
        ntype = "master"
2249
      elif node_i.master_candidate:
2250
        ntype = "master candidate"
2251
      elif node_i.drained:
2252
        ntype = "drained"
2253
        n_drained += 1
2254
      else:
2255
        ntype = "regular"
2256
      if verbose:
2257
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2258

    
2259
      msg = all_nvinfo[node].fail_msg
2260
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2261
      if msg:
2262
        nimg.rpc_fail = True
2263
        continue
2264

    
2265
      nresult = all_nvinfo[node].payload
2266

    
2267
      nimg.call_ok = self._VerifyNode(node_i, nresult)
2268
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2269
      self._VerifyNodeNetwork(node_i, nresult)
2270
      self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
2271
                            master_files)
2272

    
2273
      self._VerifyOob(node_i, nresult)
2274

    
2275
      if nimg.vm_capable:
2276
        self._VerifyNodeLVM(node_i, nresult, vg_name)
2277
        self._VerifyNodeDrbd(node_i, nresult, instanceinfo, drbd_helper,
2278
                             all_drbd_map)
2279

    
2280
        self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2281
        self._UpdateNodeInstances(node_i, nresult, nimg)
2282
        self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2283
        self._UpdateNodeOS(node_i, nresult, nimg)
2284
        if not nimg.os_fail:
2285
          if refos_img is None:
2286
            refos_img = nimg
2287
          self._VerifyNodeOS(node_i, nimg, refos_img)
2288

    
2289
    feedback_fn("* Verifying instance status")
2290
    for instance in instancelist:
2291
      if verbose:
2292
        feedback_fn("* Verifying instance %s" % instance)
2293
      inst_config = instanceinfo[instance]
2294
      self._VerifyInstance(instance, inst_config, node_image,
2295
                           instdisk[instance])
2296
      inst_nodes_offline = []
2297

    
2298
      pnode = inst_config.primary_node
2299
      pnode_img = node_image[pnode]
2300
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2301
               self.ENODERPC, pnode, "instance %s, connection to"
2302
               " primary node failed", instance)
2303

    
2304
      _ErrorIf(pnode_img.offline, self.EINSTANCEBADNODE, instance,
2305
               "instance lives on offline node %s", inst_config.primary_node)
2306

    
2307
      # If the instance is non-redundant we cannot survive losing its primary
2308
      # node, so we are not N+1 compliant. On the other hand we have no disk
2309
      # templates with more than one secondary so that situation is not well
2310
      # supported either.
2311
      # FIXME: does not support file-backed instances
2312
      if not inst_config.secondary_nodes:
2313
        i_non_redundant.append(instance)
2314

    
2315
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2316
               instance, "instance has multiple secondary nodes: %s",
2317
               utils.CommaJoin(inst_config.secondary_nodes),
2318
               code=self.ETYPE_WARNING)
2319

    
2320
      if inst_config.disk_template in constants.DTS_NET_MIRROR:
2321
        pnode = inst_config.primary_node
2322
        instance_nodes = utils.NiceSort(inst_config.all_nodes)
2323
        instance_groups = {}
2324

    
2325
        for node in instance_nodes:
2326
          instance_groups.setdefault(nodeinfo_byname[node].group,
2327
                                     []).append(node)
2328

    
2329
        pretty_list = [
2330
          "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
2331
          # Sort so that we always list the primary node first.
2332
          for group, nodes in sorted(instance_groups.items(),
2333
                                     key=lambda (_, nodes): pnode in nodes,
2334
                                     reverse=True)]
2335

    
2336
        self._ErrorIf(len(instance_groups) > 1, self.EINSTANCESPLITGROUPS,
2337
                      instance, "instance has primary and secondary nodes in"
2338
                      " different groups: %s", utils.CommaJoin(pretty_list),
2339
                      code=self.ETYPE_WARNING)
2340

    
2341
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2342
        i_non_a_balanced.append(instance)
2343

    
2344
      for snode in inst_config.secondary_nodes:
2345
        s_img = node_image[snode]
2346
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2347
                 "instance %s, connection to secondary node failed", instance)
2348

    
2349
        if s_img.offline:
2350
          inst_nodes_offline.append(snode)
2351

    
2352
      # warn that the instance lives on offline nodes
2353
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2354
               "instance has offline secondary node(s) %s",
2355
               utils.CommaJoin(inst_nodes_offline))
2356
      # ... or ghost/non-vm_capable nodes
2357
      for node in inst_config.all_nodes:
2358
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2359
                 "instance lives on ghost node %s", node)
2360
        _ErrorIf(not node_image[node].vm_capable, self.EINSTANCEBADNODE,
2361
                 instance, "instance lives on non-vm_capable node %s", node)
2362

    
2363
    feedback_fn("* Verifying orphan volumes")
2364
    reserved = utils.FieldSet(*cluster.reserved_lvs)
2365
    self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2366

    
2367
    feedback_fn("* Verifying orphan instances")
2368
    self._VerifyOrphanInstances(instancelist, node_image)
2369

    
2370
    if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2371
      feedback_fn("* Verifying N+1 Memory redundancy")
2372
      self._VerifyNPlusOneMemory(node_image, instanceinfo)
2373

    
2374
    feedback_fn("* Other Notes")
2375
    if i_non_redundant:
2376
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
2377
                  % len(i_non_redundant))
2378

    
2379
    if i_non_a_balanced:
2380
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
2381
                  % len(i_non_a_balanced))
2382

    
2383
    if n_offline:
2384
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
2385

    
2386
    if n_drained:
2387
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
2388

    
2389
    return not self.bad
2390

    
2391
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2392
    """Analyze the post-hooks' result
2393

2394
    This method analyses the hook result, handles it, and sends some
2395
    nicely-formatted feedback back to the user.
2396

2397
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
2398
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2399
    @param hooks_results: the results of the multi-node hooks rpc call
2400
    @param feedback_fn: function used send feedback back to the caller
2401
    @param lu_result: previous Exec result
2402
    @return: the new Exec result, based on the previous result
2403
        and hook results
2404

2405
    """
2406
    # We only really run POST phase hooks, and are only interested in
2407
    # their results
2408
    if phase == constants.HOOKS_PHASE_POST:
2409
      # Used to change hooks' output to proper indentation
2410
      feedback_fn("* Hooks Results")
2411
      assert hooks_results, "invalid result from hooks"
2412

    
2413
      for node_name in hooks_results:
2414
        res = hooks_results[node_name]
2415
        msg = res.fail_msg
2416
        test = msg and not res.offline
2417
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
2418
                      "Communication failure in hooks execution: %s", msg)
2419
        if res.offline or msg:
2420
          # No need to investigate payload if node is offline or gave an error.
2421
          # override manually lu_result here as _ErrorIf only
2422
          # overrides self.bad
2423
          lu_result = 1
2424
          continue
2425
        for script, hkr, output in res.payload:
2426
          test = hkr == constants.HKR_FAIL
2427
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
2428
                        "Script %s failed, output:", script)
2429
          if test:
2430
            output = self._HOOKS_INDENT_RE.sub('      ', output)
2431
            feedback_fn("%s" % output)
2432
            lu_result = 0
2433

    
2434
      return lu_result
2435

    
2436

    
2437
class LUClusterVerifyDisks(NoHooksLU):
2438
  """Verifies the cluster disks status.
2439

2440
  """
2441
  REQ_BGL = False
2442

    
2443
  def ExpandNames(self):
2444
    self.needed_locks = {
2445
      locking.LEVEL_NODE: locking.ALL_SET,
2446
      locking.LEVEL_INSTANCE: locking.ALL_SET,
2447
    }
2448
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2449

    
2450
  def Exec(self, feedback_fn):
2451
    """Verify integrity of cluster disks.
2452

2453
    @rtype: tuple of three items
2454
    @return: a tuple of (dict of node-to-node_error, list of instances
2455
        which need activate-disks, dict of instance: (node, volume) for
2456
        missing volumes
2457

2458
    """
2459
    result = res_nodes, res_instances, res_missing = {}, [], {}
2460

    
2461
    nodes = utils.NiceSort(self.cfg.GetVmCapableNodeList())
2462
    instances = self.cfg.GetAllInstancesInfo().values()
2463

    
2464
    nv_dict = {}
2465
    for inst in instances:
2466
      inst_lvs = {}
2467
      if not inst.admin_up:
2468
        continue
2469
      inst.MapLVsByNode(inst_lvs)
2470
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
2471
      for node, vol_list in inst_lvs.iteritems():
2472
        for vol in vol_list:
2473
          nv_dict[(node, vol)] = inst
2474

    
2475
    if not nv_dict:
2476
      return result
2477

    
2478
    node_lvs = self.rpc.call_lv_list(nodes, [])
2479
    for node, node_res in node_lvs.items():
2480
      if node_res.offline:
2481
        continue
2482
      msg = node_res.fail_msg
2483
      if msg:
2484
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2485
        res_nodes[node] = msg
2486
        continue
2487

    
2488
      lvs = node_res.payload
2489
      for lv_name, (_, _, lv_online) in lvs.items():
2490
        inst = nv_dict.pop((node, lv_name), None)
2491
        if (not lv_online and inst is not None
2492
            and inst.name not in res_instances):
2493
          res_instances.append(inst.name)
2494

    
2495
    # any leftover items in nv_dict are missing LVs, let's arrange the
2496
    # data better
2497
    for key, inst in nv_dict.iteritems():
2498
      if inst.name not in res_missing:
2499
        res_missing[inst.name] = []
2500
      res_missing[inst.name].append(key)
2501

    
2502
    return result
2503

    
2504

    
2505
class LUClusterRepairDiskSizes(NoHooksLU):
2506
  """Verifies the cluster disks sizes.
2507

2508
  """
2509
  REQ_BGL = False
2510

    
2511
  def ExpandNames(self):
2512
    if self.op.instances:
2513
      self.wanted_names = []
2514
      for name in self.op.instances:
2515
        full_name = _ExpandInstanceName(self.cfg, name)
2516
        self.wanted_names.append(full_name)
2517
      self.needed_locks = {
2518
        locking.LEVEL_NODE: [],
2519
        locking.LEVEL_INSTANCE: self.wanted_names,
2520
        }
2521
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2522
    else:
2523
      self.wanted_names = None
2524
      self.needed_locks = {
2525
        locking.LEVEL_NODE: locking.ALL_SET,
2526
        locking.LEVEL_INSTANCE: locking.ALL_SET,
2527
        }
2528
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2529

    
2530
  def DeclareLocks(self, level):
2531
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
2532
      self._LockInstancesNodes(primary_only=True)
2533

    
2534
  def CheckPrereq(self):
2535
    """Check prerequisites.
2536

2537
    This only checks the optional instance list against the existing names.
2538

2539
    """
2540
    if self.wanted_names is None:
2541
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2542

    
2543
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2544
                             in self.wanted_names]
2545

    
2546
  def _EnsureChildSizes(self, disk):
2547
    """Ensure children of the disk have the needed disk size.
2548

2549
    This is valid mainly for DRBD8 and fixes an issue where the
2550
    children have smaller disk size.
2551

2552
    @param disk: an L{ganeti.objects.Disk} object
2553

2554
    """
2555
    if disk.dev_type == constants.LD_DRBD8:
2556
      assert disk.children, "Empty children for DRBD8?"
2557
      fchild = disk.children[0]
2558
      mismatch = fchild.size < disk.size
2559
      if mismatch:
2560
        self.LogInfo("Child disk has size %d, parent %d, fixing",
2561
                     fchild.size, disk.size)
2562
        fchild.size = disk.size
2563

    
2564
      # and we recurse on this child only, not on the metadev
2565
      return self._EnsureChildSizes(fchild) or mismatch
2566
    else:
2567
      return False
2568

    
2569
  def Exec(self, feedback_fn):
2570
    """Verify the size of cluster disks.
2571

2572
    """
2573
    # TODO: check child disks too
2574
    # TODO: check differences in size between primary/secondary nodes
2575
    per_node_disks = {}
2576
    for instance in self.wanted_instances:
2577
      pnode = instance.primary_node
2578
      if pnode not in per_node_disks:
2579
        per_node_disks[pnode] = []
2580
      for idx, disk in enumerate(instance.disks):
2581
        per_node_disks[pnode].append((instance, idx, disk))
2582

    
2583
    changed = []
2584
    for node, dskl in per_node_disks.items():
2585
      newl = [v[2].Copy() for v in dskl]
2586
      for dsk in newl:
2587
        self.cfg.SetDiskID(dsk, node)
2588
      result = self.rpc.call_blockdev_getsize(node, newl)
2589
      if result.fail_msg:
2590
        self.LogWarning("Failure in blockdev_getsize call to node"
2591
                        " %s, ignoring", node)
2592
        continue
2593
      if len(result.payload) != len(dskl):
2594
        logging.warning("Invalid result from node %s: len(dksl)=%d,"
2595
                        " result.payload=%s", node, len(dskl), result.payload)
2596
        self.LogWarning("Invalid result from node %s, ignoring node results",
2597
                        node)
2598
        continue
2599
      for ((instance, idx, disk), size) in zip(dskl, result.payload):
2600
        if size is None:
2601
          self.LogWarning("Disk %d of instance %s did not return size"
2602
                          " information, ignoring", idx, instance.name)
2603
          continue
2604
        if not isinstance(size, (int, long)):
2605
          self.LogWarning("Disk %d of instance %s did not return valid"
2606
                          " size information, ignoring", idx, instance.name)
2607
          continue
2608
        size = size >> 20
2609
        if size != disk.size:
2610
          self.LogInfo("Disk %d of instance %s has mismatched size,"
2611
                       " correcting: recorded %d, actual %d", idx,
2612
                       instance.name, disk.size, size)
2613
          disk.size = size
2614
          self.cfg.Update(instance, feedback_fn)
2615
          changed.append((instance.name, idx, size))
2616
        if self._EnsureChildSizes(disk):
2617
          self.cfg.Update(instance, feedback_fn)
2618
          changed.append((instance.name, idx, disk.size))
2619
    return changed
2620

    
2621

    
2622
class LUClusterRename(LogicalUnit):
2623
  """Rename the cluster.
2624

2625
  """
2626
  HPATH = "cluster-rename"
2627
  HTYPE = constants.HTYPE_CLUSTER
2628

    
2629
  def BuildHooksEnv(self):
2630
    """Build hooks env.
2631

2632
    """
2633
    env = {
2634
      "OP_TARGET": self.cfg.GetClusterName(),
2635
      "NEW_NAME": self.op.name,
2636
      }
2637
    mn = self.cfg.GetMasterNode()
2638
    all_nodes = self.cfg.GetNodeList()
2639
    return env, [mn], all_nodes
2640

    
2641
  def CheckPrereq(self):
2642
    """Verify that the passed name is a valid one.
2643

2644
    """
2645
    hostname = netutils.GetHostname(name=self.op.name,
2646
                                    family=self.cfg.GetPrimaryIPFamily())
2647

    
2648
    new_name = hostname.name
2649
    self.ip = new_ip = hostname.ip
2650
    old_name = self.cfg.GetClusterName()
2651
    old_ip = self.cfg.GetMasterIP()
2652
    if new_name == old_name and new_ip == old_ip:
2653
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
2654
                                 " cluster has changed",
2655
                                 errors.ECODE_INVAL)
2656
    if new_ip != old_ip:
2657
      if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2658
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
2659
                                   " reachable on the network" %
2660
                                   new_ip, errors.ECODE_NOTUNIQUE)
2661

    
2662
    self.op.name = new_name
2663

    
2664
  def Exec(self, feedback_fn):
2665
    """Rename the cluster.
2666

2667
    """
2668
    clustername = self.op.name
2669
    ip = self.ip
2670

    
2671
    # shutdown the master IP
2672
    master = self.cfg.GetMasterNode()
2673
    result = self.rpc.call_node_stop_master(master, False)
2674
    result.Raise("Could not disable the master role")
2675

    
2676
    try:
2677
      cluster = self.cfg.GetClusterInfo()
2678
      cluster.cluster_name = clustername
2679
      cluster.master_ip = ip
2680
      self.cfg.Update(cluster, feedback_fn)
2681

    
2682
      # update the known hosts file
2683
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2684
      node_list = self.cfg.GetOnlineNodeList()
2685
      try:
2686
        node_list.remove(master)
2687
      except ValueError:
2688
        pass
2689
      _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
2690
    finally:
2691
      result = self.rpc.call_node_start_master(master, False, False)
2692
      msg = result.fail_msg
2693
      if msg:
2694
        self.LogWarning("Could not re-enable the master role on"
2695
                        " the master, please restart manually: %s", msg)
2696

    
2697
    return clustername
2698

    
2699

    
2700
class LUClusterSetParams(LogicalUnit):
2701
  """Change the parameters of the cluster.
2702

2703
  """
2704
  HPATH = "cluster-modify"
2705
  HTYPE = constants.HTYPE_CLUSTER
2706
  REQ_BGL = False
2707

    
2708
  def CheckArguments(self):
2709
    """Check parameters
2710

2711
    """
2712
    if self.op.uid_pool:
2713
      uidpool.CheckUidPool(self.op.uid_pool)
2714

    
2715
    if self.op.add_uids:
2716
      uidpool.CheckUidPool(self.op.add_uids)
2717

    
2718
    if self.op.remove_uids:
2719
      uidpool.CheckUidPool(self.op.remove_uids)
2720

    
2721
  def ExpandNames(self):
2722
    # FIXME: in the future maybe other cluster params won't require checking on
2723
    # all nodes to be modified.
2724
    self.needed_locks = {
2725
      locking.LEVEL_NODE: locking.ALL_SET,
2726
    }
2727
    self.share_locks[locking.LEVEL_NODE] = 1
2728

    
2729
  def BuildHooksEnv(self):
2730
    """Build hooks env.
2731

2732
    """
2733
    env = {
2734
      "OP_TARGET": self.cfg.GetClusterName(),
2735
      "NEW_VG_NAME": self.op.vg_name,
2736
      }
2737
    mn = self.cfg.GetMasterNode()
2738
    return env, [mn], [mn]
2739

    
2740
  def CheckPrereq(self):
2741
    """Check prerequisites.
2742

2743
    This checks whether the given params don't conflict and
2744
    if the given volume group is valid.
2745

2746
    """
2747
    if self.op.vg_name is not None and not self.op.vg_name:
2748
      if self.cfg.HasAnyDiskOfType(constants.LD_LV):
2749
        raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
2750
                                   " instances exist", errors.ECODE_INVAL)
2751

    
2752
    if self.op.drbd_helper is not None and not self.op.drbd_helper:
2753
      if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
2754
        raise errors.OpPrereqError("Cannot disable drbd helper while"
2755
                                   " drbd-based instances exist",
2756
                                   errors.ECODE_INVAL)
2757

    
2758
    node_list = self.acquired_locks[locking.LEVEL_NODE]
2759

    
2760
    # if vg_name not None, checks given volume group on all nodes
2761
    if self.op.vg_name:
2762
      vglist = self.rpc.call_vg_list(node_list)
2763
      for node in node_list:
2764
        msg = vglist[node].fail_msg
2765
        if msg:
2766
          # ignoring down node
2767
          self.LogWarning("Error while gathering data on node %s"
2768
                          " (ignoring node): %s", node, msg)
2769
          continue
2770
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2771
                                              self.op.vg_name,
2772
                                              constants.MIN_VG_SIZE)
2773
        if vgstatus:
2774
          raise errors.OpPrereqError("Error on node '%s': %s" %
2775
                                     (node, vgstatus), errors.ECODE_ENVIRON)
2776

    
2777
    if self.op.drbd_helper:
2778
      # checks given drbd helper on all nodes
2779
      helpers = self.rpc.call_drbd_helper(node_list)
2780
      for node in node_list:
2781
        ninfo = self.cfg.GetNodeInfo(node)
2782
        if ninfo.offline:
2783
          self.LogInfo("Not checking drbd helper on offline node %s", node)
2784
          continue
2785
        msg = helpers[node].fail_msg
2786
        if msg:
2787
          raise errors.OpPrereqError("Error checking drbd helper on node"
2788
                                     " '%s': %s" % (node, msg),
2789
                                     errors.ECODE_ENVIRON)
2790
        node_helper = helpers[node].payload
2791
        if node_helper != self.op.drbd_helper:
2792
          raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
2793
                                     (node, node_helper), errors.ECODE_ENVIRON)
2794

    
2795
    self.cluster = cluster = self.cfg.GetClusterInfo()
2796
    # validate params changes
2797
    if self.op.beparams:
2798
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2799
      self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
2800

    
2801
    if self.op.ndparams:
2802
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
2803
      self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
2804

    
2805
      # TODO: we need a more general way to handle resetting
2806
      # cluster-level parameters to default values
2807
      if self.new_ndparams["oob_program"] == "":
2808
        self.new_ndparams["oob_program"] = \
2809
            constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
2810

    
2811
    if self.op.nicparams:
2812
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2813
      self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
2814
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
2815
      nic_errors = []
2816

    
2817
      # check all instances for consistency
2818
      for instance in self.cfg.GetAllInstancesInfo().values():
2819
        for nic_idx, nic in enumerate(instance.nics):
2820
          params_copy = copy.deepcopy(nic.nicparams)
2821
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
2822

    
2823
          # check parameter syntax
2824
          try:
2825
            objects.NIC.CheckParameterSyntax(params_filled)
2826
          except errors.ConfigurationError, err:
2827
            nic_errors.append("Instance %s, nic/%d: %s" %
2828
                              (instance.name, nic_idx, err))
2829

    
2830
          # if we're moving instances to routed, check that they have an ip
2831
          target_mode = params_filled[constants.NIC_MODE]
2832
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2833
            nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2834
                              (instance.name, nic_idx))
2835
      if nic_errors:
2836
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2837
                                   "\n".join(nic_errors))
2838

    
2839
    # hypervisor list/parameters
2840
    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
2841
    if self.op.hvparams:
2842
      for hv_name, hv_dict in self.op.hvparams.items():
2843
        if hv_name not in self.new_hvparams:
2844
          self.new_hvparams[hv_name] = hv_dict
2845
        else:
2846
          self.new_hvparams[hv_name].update(hv_dict)
2847

    
2848
    # os hypervisor parameters
2849
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2850
    if self.op.os_hvp:
2851
      for os_name, hvs in self.op.os_hvp.items():
2852
        if os_name not in self.new_os_hvp:
2853
          self.new_os_hvp[os_name] = hvs
2854
        else:
2855
          for hv_name, hv_dict in hvs.items():
2856
            if hv_name not in self.new_os_hvp[os_name]:
2857
              self.new_os_hvp[os_name][hv_name] = hv_dict
2858
            else:
2859
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
2860

    
2861
    # os parameters
2862
    self.new_osp = objects.FillDict(cluster.osparams, {})
2863
    if self.op.osparams:
2864
      for os_name, osp in self.op.osparams.items():
2865
        if os_name not in self.new_osp:
2866
          self.new_osp[os_name] = {}
2867

    
2868
        self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
2869
                                                  use_none=True)
2870

    
2871
        if not self.new_osp[os_name]:
2872
          # we removed all parameters
2873
          del self.new_osp[os_name]
2874
        else:
2875
          # check the parameter validity (remote check)
2876
          _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
2877
                         os_name, self.new_osp[os_name])
2878

    
2879
    # changes to the hypervisor list
2880
    if self.op.enabled_hypervisors is not None:
2881
      self.hv_list = self.op.enabled_hypervisors
2882
      for hv in self.hv_list:
2883
        # if the hypervisor doesn't already exist in the cluster
2884
        # hvparams, we initialize it to empty, and then (in both
2885
        # cases) we make sure to fill the defaults, as we might not
2886
        # have a complete defaults list if the hypervisor wasn't
2887
        # enabled before
2888
        if hv not in new_hvp:
2889
          new_hvp[hv] = {}
2890
        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
2891
        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
2892
    else:
2893
      self.hv_list = cluster.enabled_hypervisors
2894

    
2895
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
2896
      # either the enabled list has changed, or the parameters have, validate
2897
      for hv_name, hv_params in self.new_hvparams.items():
2898
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
2899
            (self.op.enabled_hypervisors and
2900
             hv_name in self.op.enabled_hypervisors)):
2901
          # either this is a new hypervisor, or its parameters have changed
2902
          hv_class = hypervisor.GetHypervisor(hv_name)
2903
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2904
          hv_class.CheckParameterSyntax(hv_params)
2905
          _CheckHVParams(self, node_list, hv_name, hv_params)
2906

    
2907
    if self.op.os_hvp:
2908
      # no need to check any newly-enabled hypervisors, since the
2909
      # defaults have already been checked in the above code-block
2910
      for os_name, os_hvp in self.new_os_hvp.items():
2911
        for hv_name, hv_params in os_hvp.items():
2912
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2913
          # we need to fill in the new os_hvp on top of the actual hv_p
2914
          cluster_defaults = self.new_hvparams.get(hv_name, {})
2915
          new_osp = objects.FillDict(cluster_defaults, hv_params)
2916
          hv_class = hypervisor.GetHypervisor(hv_name)
2917
          hv_class.CheckParameterSyntax(new_osp)
2918
          _CheckHVParams(self, node_list, hv_name, new_osp)
2919

    
2920
    if self.op.default_iallocator:
2921
      alloc_script = utils.FindFile(self.op.default_iallocator,
2922
                                    constants.IALLOCATOR_SEARCH_PATH,
2923
                                    os.path.isfile)
2924
      if alloc_script is None:
2925
        raise errors.OpPrereqError("Invalid default iallocator script '%s'"
2926
                                   " specified" % self.op.default_iallocator,
2927
                                   errors.ECODE_INVAL)
2928

    
2929
  def Exec(self, feedback_fn):
2930
    """Change the parameters of the cluster.
2931

2932
    """
2933
    if self.op.vg_name is not None:
2934
      new_volume = self.op.vg_name
2935
      if not new_volume:
2936
        new_volume = None
2937
      if new_volume != self.cfg.GetVGName():
2938
        self.cfg.SetVGName(new_volume)
2939
      else:
2940
        feedback_fn("Cluster LVM configuration already in desired"
2941
                    " state, not changing")
2942
    if self.op.drbd_helper is not None:
2943
      new_helper = self.op.drbd_helper
2944
      if not new_helper:
2945
        new_helper = None
2946
      if new_helper != self.cfg.GetDRBDHelper():
2947
        self.cfg.SetDRBDHelper(new_helper)
2948
      else:
2949
        feedback_fn("Cluster DRBD helper already in desired state,"
2950
                    " not changing")
2951
    if self.op.hvparams:
2952
      self.cluster.hvparams = self.new_hvparams
2953
    if self.op.os_hvp:
2954
      self.cluster.os_hvp = self.new_os_hvp
2955
    if self.op.enabled_hypervisors is not None:
2956
      self.cluster.hvparams = self.new_hvparams
2957
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2958
    if self.op.beparams:
2959
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2960
    if self.op.nicparams:
2961
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2962
    if self.op.osparams:
2963
      self.cluster.osparams = self.new_osp
2964
    if self.op.ndparams:
2965
      self.cluster.ndparams = self.new_ndparams
2966

    
2967
    if self.op.candidate_pool_size is not None:
2968
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
2969
      # we need to update the pool size here, otherwise the save will fail
2970
      _AdjustCandidatePool(self, [])
2971

    
2972
    if self.op.maintain_node_health is not None:
2973
      self.cluster.maintain_node_health = self.op.maintain_node_health
2974

    
2975
    if self.op.prealloc_wipe_disks is not None:
2976
      self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
2977

    
2978
    if self.op.add_uids is not None:
2979
      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
2980

    
2981
    if self.op.remove_uids is not None:
2982
      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
2983

    
2984
    if self.op.uid_pool is not None:
2985
      self.cluster.uid_pool = self.op.uid_pool
2986

    
2987
    if self.op.default_iallocator is not None:
2988
      self.cluster.default_iallocator = self.op.default_iallocator
2989

    
2990
    if self.op.reserved_lvs is not None:
2991
      self.cluster.reserved_lvs = self.op.reserved_lvs
2992

    
2993
    def helper_os(aname, mods, desc):
2994
      desc += " OS list"
2995
      lst = getattr(self.cluster, aname)
2996
      for key, val in mods:
2997
        if key == constants.DDM_ADD:
2998
          if val in lst:
2999
            feedback_fn("OS %s already in %s, ignoring" % (val, desc))
3000
          else:
3001
            lst.append(val)
3002
        elif key == constants.DDM_REMOVE:
3003
          if val in lst:
3004
            lst.remove(val)
3005
          else:
3006
            feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
3007
        else:
3008
          raise errors.ProgrammerError("Invalid modification '%s'" % key)
3009

    
3010
    if self.op.hidden_os:
3011
      helper_os("hidden_os", self.op.hidden_os, "hidden")
3012

    
3013
    if self.op.blacklisted_os:
3014
      helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
3015

    
3016
    if self.op.master_netdev:
3017
      master = self.cfg.GetMasterNode()
3018
      feedback_fn("Shutting down master ip on the current netdev (%s)" %
3019
                  self.cluster.master_netdev)
3020
      result = self.rpc.call_node_stop_master(master, False)
3021
      result.Raise("Could not disable the master ip")
3022
      feedback_fn("Changing master_netdev from %s to %s" %
3023
                  (self.cluster.master_netdev, self.op.master_netdev))
3024
      self.cluster.master_netdev = self.op.master_netdev
3025

    
3026
    self.cfg.Update(self.cluster, feedback_fn)
3027

    
3028
    if self.op.master_netdev:
3029
      feedback_fn("Starting the master ip on the new master netdev (%s)" %
3030
                  self.op.master_netdev)
3031
      result = self.rpc.call_node_start_master(master, False, False)
3032
      if result.fail_msg:
3033
        self.LogWarning("Could not re-enable the master ip on"
3034
                        " the master, please restart manually: %s",
3035
                        result.fail_msg)
3036

    
3037

    
3038
def _UploadHelper(lu, nodes, fname):
3039
  """Helper for uploading a file and showing warnings.
3040

3041
  """
3042
  if os.path.exists(fname):
3043
    result = lu.rpc.call_upload_file(nodes, fname)
3044
    for to_node, to_result in result.items():
3045
      msg = to_result.fail_msg
3046
      if msg:
3047
        msg = ("Copy of file %s to node %s failed: %s" %
3048
               (fname, to_node, msg))
3049
        lu.proc.LogWarning(msg)
3050

    
3051

    
3052
def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
3053
  """Distribute additional files which are part of the cluster configuration.
3054

3055
  ConfigWriter takes care of distributing the config and ssconf files, but
3056
  there are more files which should be distributed to all nodes. This function
3057
  makes sure those are copied.
3058

3059
  @param lu: calling logical unit
3060
  @param additional_nodes: list of nodes not in the config to distribute to
3061
  @type additional_vm: boolean
3062
  @param additional_vm: whether the additional nodes are vm-capable or not
3063

3064
  """
3065
  # 1. Gather target nodes
3066
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
3067
  dist_nodes = lu.cfg.GetOnlineNodeList()
3068
  nvm_nodes = lu.cfg.GetNonVmCapableNodeList()
3069
  vm_nodes = [name for name in dist_nodes if name not in nvm_nodes]
3070
  if additional_nodes is not None:
3071
    dist_nodes.extend(additional_nodes)
3072
    if additional_vm:
3073
      vm_nodes.extend(additional_nodes)
3074
  if myself.name in dist_nodes:
3075
    dist_nodes.remove(myself.name)
3076
  if myself.name in vm_nodes:
3077
    vm_nodes.remove(myself.name)
3078

    
3079
  # 2. Gather files to distribute
3080
  dist_files = set([constants.ETC_HOSTS,
3081
                    constants.SSH_KNOWN_HOSTS_FILE,
3082
                    constants.RAPI_CERT_FILE,
3083
                    constants.RAPI_USERS_FILE,
3084
                    constants.CONFD_HMAC_KEY,
3085
                    constants.CLUSTER_DOMAIN_SECRET_FILE,
3086
                   ])
3087

    
3088
  vm_files = set()
3089
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
3090
  for hv_name in enabled_hypervisors:
3091
    hv_class = hypervisor.GetHypervisor(hv_name)
3092
    vm_files.update(hv_class.GetAncillaryFiles())
3093

    
3094
  # 3. Perform the files upload
3095
  for fname in dist_files:
3096
    _UploadHelper(lu, dist_nodes, fname)
3097
  for fname in vm_files:
3098
    _UploadHelper(lu, vm_nodes, fname)
3099

    
3100

    
3101
class LUClusterRedistConf(NoHooksLU):
3102
  """Force the redistribution of cluster configuration.
3103

3104
  This is a very simple LU.
3105

3106
  """
3107
  REQ_BGL = False
3108

    
3109
  def ExpandNames(self):
3110
    self.needed_locks = {
3111
      locking.LEVEL_NODE: locking.ALL_SET,
3112
    }
3113
    self.share_locks[locking.LEVEL_NODE] = 1
3114

    
3115
  def Exec(self, feedback_fn):
3116
    """Redistribute the configuration.
3117

3118
    """
3119
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
3120
    _RedistributeAncillaryFiles(self)
3121

    
3122

    
3123
def _WaitForSync(lu, instance, disks=None, oneshot=False):
3124
  """Sleep and poll for an instance's disk to sync.
3125

3126
  """
3127
  if not instance.disks or disks is not None and not disks:
3128
    return True
3129

    
3130
  disks = _ExpandCheckDisks(instance, disks)
3131

    
3132
  if not oneshot:
3133
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
3134

    
3135
  node = instance.primary_node
3136

    
3137
  for dev in disks:
3138
    lu.cfg.SetDiskID(dev, node)
3139

    
3140
  # TODO: Convert to utils.Retry
3141

    
3142
  retries = 0
3143
  degr_retries = 10 # in seconds, as we sleep 1 second each time
3144
  while True:
3145
    max_time = 0
3146
    done = True
3147
    cumul_degraded = False
3148
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
3149
    msg = rstats.fail_msg
3150
    if msg:
3151
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
3152
      retries += 1
3153
      if retries >= 10:
3154
        raise errors.RemoteError("Can't contact node %s for mirror data,"
3155
                                 " aborting." % node)
3156
      time.sleep(6)
3157
      continue
3158
    rstats = rstats.payload
3159
    retries = 0
3160
    for i, mstat in enumerate(rstats):
3161
      if mstat is None:
3162
        lu.LogWarning("Can't compute data for node %s/%s",
3163
                           node, disks[i].iv_name)
3164
        continue
3165

    
3166
      cumul_degraded = (cumul_degraded or
3167
                        (mstat.is_degraded and mstat.sync_percent is None))
3168
      if mstat.sync_percent is not None:
3169
        done = False
3170
        if mstat.estimated_time is not None:
3171
          rem_time = ("%s remaining (estimated)" %
3172
                      utils.FormatSeconds(mstat.estimated_time))
3173
          max_time = mstat.estimated_time
3174
        else:
3175
          rem_time = "no time estimate"
3176
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3177
                        (disks[i].iv_name, mstat.sync_percent, rem_time))
3178

    
3179
    # if we're done but degraded, let's do a few small retries, to
3180
    # make sure we see a stable and not transient situation; therefore
3181
    # we force restart of the loop
3182
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
3183
      logging.info("Degraded disks found, %d retries left", degr_retries)
3184
      degr_retries -= 1
3185
      time.sleep(1)
3186
      continue
3187

    
3188
    if done or oneshot:
3189
      break
3190

    
3191
    time.sleep(min(60, max_time))
3192

    
3193
  if done:
3194
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3195
  return not cumul_degraded
3196

    
3197

    
3198
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3199
  """Check that mirrors are not degraded.
3200

3201
  The ldisk parameter, if True, will change the test from the
3202
  is_degraded attribute (which represents overall non-ok status for
3203
  the device(s)) to the ldisk (representing the local storage status).
3204

3205
  """
3206
  lu.cfg.SetDiskID(dev, node)
3207

    
3208
  result = True
3209

    
3210
  if on_primary or dev.AssembleOnSecondary():
3211
    rstats = lu.rpc.call_blockdev_find(node, dev)
3212
    msg = rstats.fail_msg
3213
    if msg:
3214
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3215
      result = False
3216
    elif not rstats.payload:
3217
      lu.LogWarning("Can't find disk on node %s", node)
3218
      result = False
3219
    else:
3220
      if ldisk:
3221
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3222
      else:
3223
        result = result and not rstats.payload.is_degraded
3224

    
3225
  if dev.children:
3226
    for child in dev.children:
3227
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3228

    
3229
  return result
3230

    
3231

    
3232
class LUOobCommand(NoHooksLU):
3233
  """Logical unit for OOB handling.
3234

3235
  """
3236
  REG_BGL = False
3237

    
3238
  def CheckPrereq(self):
3239
    """Check prerequisites.
3240

3241
    This checks:
3242
     - the node exists in the configuration
3243
     - OOB is supported
3244

3245
    Any errors are signaled by raising errors.OpPrereqError.
3246

3247
    """
3248
    self.nodes = []
3249
    for node_name in self.op.node_names:
3250
      node = self.cfg.GetNodeInfo(node_name)
3251

    
3252
      if node is None:
3253
        raise errors.OpPrereqError("Node %s not found" % node_name,
3254
                                   errors.ECODE_NOENT)
3255
      else:
3256
        self.nodes.append(node)
3257

    
3258
      if (self.op.command == constants.OOB_POWER_OFF and not node.offline):
3259
        raise errors.OpPrereqError(("Cannot power off node %s because it is"
3260
                                    " not marked offline") % node_name,
3261
                                   errors.ECODE_STATE)
3262

    
3263
  def ExpandNames(self):
3264
    """Gather locks we need.
3265

3266
    """
3267
    if self.op.node_names:
3268
      self.op.node_names = [_ExpandNodeName(self.cfg, name)
3269
                            for name in self.op.node_names]
3270
    else:
3271
      self.op.node_names = self.cfg.GetNodeList()
3272

    
3273
    self.needed_locks = {
3274
      locking.LEVEL_NODE: self.op.node_names,
3275
      }
3276

    
3277
  def Exec(self, feedback_fn):
3278
    """Execute OOB and return result if we expect any.
3279

3280
    """
3281
    master_node = self.cfg.GetMasterNode()
3282
    ret = []
3283

    
3284
    for node in self.nodes:
3285
      node_entry = [(constants.RS_NORMAL, node.name)]
3286
      ret.append(node_entry)
3287

    
3288
      oob_program = _SupportsOob(self.cfg, node)
3289

    
3290
      if not oob_program:
3291
        node_entry.append((constants.RS_UNAVAIL, None))
3292
        continue
3293

    
3294
      logging.info("Executing out-of-band command '%s' using '%s' on %s",
3295
                   self.op.command, oob_program, node.name)
3296
      result = self.rpc.call_run_oob(master_node, oob_program,
3297
                                     self.op.command, node.name,
3298
                                     self.op.timeout)
3299

    
3300
      if result.fail_msg:
3301
        self.LogWarning("On node '%s' out-of-band RPC failed with: %s",
3302
                        node.name, result.fail_msg)
3303
        node_entry.append((constants.RS_NODATA, None))
3304
      else:
3305
        try:
3306
          self._CheckPayload(result)
3307
        except errors.OpExecError, err:
3308
          self.LogWarning("The payload returned by '%s' is not valid: %s",
3309
                          node.name, err)
3310
          node_entry.append((constants.RS_NODATA, None))
3311
        else:
3312
          if self.op.command == constants.OOB_HEALTH:
3313
            # For health we should log important events
3314
            for item, status in result.payload:
3315
              if status in [constants.OOB_STATUS_WARNING,
3316
                            constants.OOB_STATUS_CRITICAL]:
3317
                self.LogWarning("On node '%s' item '%s' has status '%s'",
3318
                                node.name, item, status)
3319

    
3320
          if self.op.command == constants.OOB_POWER_ON:
3321
            node.powered = True
3322
          elif self.op.command == constants.OOB_POWER_OFF:
3323
            node.powered = False
3324
          elif self.op.command == constants.OOB_POWER_STATUS:
3325
            powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
3326
            if powered != node.powered:
3327
              logging.warning(("Recorded power state (%s) of node '%s' does not"
3328
                               " match actual power state (%s)"), node.powered,
3329
                              node.name, powered)
3330

    
3331
          # For configuration changing commands we should update the node
3332
          if self.op.command in (constants.OOB_POWER_ON,
3333
                                 constants.OOB_POWER_OFF):
3334
            self.cfg.Update(node, feedback_fn)
3335

    
3336
          node_entry.append((constants.RS_NORMAL, result.payload))
3337

    
3338
    return ret
3339

    
3340
  def _CheckPayload(self, result):
3341
    """Checks if the payload is valid.
3342

3343
    @param result: RPC result
3344
    @raises errors.OpExecError: If payload is not valid
3345

3346
    """
3347
    errs = []
3348
    if self.op.command == constants.OOB_HEALTH:
3349
      if not isinstance(result.payload, list):
3350
        errs.append("command 'health' is expected to return a list but got %s" %
3351
                    type(result.payload))
3352
      else:
3353
        for item, status in result.payload:
3354
          if status not in constants.OOB_STATUSES:
3355
            errs.append("health item '%s' has invalid status '%s'" %
3356
                        (item, status))
3357

    
3358
    if self.op.command == constants.OOB_POWER_STATUS:
3359
      if not isinstance(result.payload, dict):
3360
        errs.append("power-status is expected to return a dict but got %s" %
3361
                    type(result.payload))
3362

    
3363
    if self.op.command in [
3364
        constants.OOB_POWER_ON,
3365
        constants.OOB_POWER_OFF,
3366
        constants.OOB_POWER_CYCLE,
3367
        ]:
3368
      if result.payload is not None:
3369
        errs.append("%s is expected to not return payload but got '%s'" %
3370
                    (self.op.command, result.payload))
3371

    
3372
    if errs:
3373
      raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
3374
                               utils.CommaJoin(errs))
3375

    
3376

    
3377

    
3378
class LUOsDiagnose(NoHooksLU):
3379
  """Logical unit for OS diagnose/query.
3380

3381
  """
3382
  REQ_BGL = False
3383
  _HID = "hidden"
3384
  _BLK = "blacklisted"
3385
  _VLD = "valid"
3386
  _FIELDS_STATIC = utils.FieldSet()
3387
  _FIELDS_DYNAMIC = utils.FieldSet("name", _VLD, "node_status", "variants",
3388
                                   "parameters", "api_versions", _HID, _BLK)
3389

    
3390
  def CheckArguments(self):
3391
    if self.op.names:
3392
      raise errors.OpPrereqError("Selective OS query not supported",
3393
                                 errors.ECODE_INVAL)
3394

    
3395
    _CheckOutputFields(static=self._FIELDS_STATIC,
3396
                       dynamic=self._FIELDS_DYNAMIC,
3397
                       selected=self.op.output_fields)
3398

    
3399
  def ExpandNames(self):
3400
    # Lock all nodes, in shared mode
3401
    # Temporary removal of locks, should be reverted later
3402
    # TODO: reintroduce locks when they are lighter-weight
3403
    self.needed_locks = {}
3404
    #self.share_locks[locking.LEVEL_NODE] = 1
3405
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3406

    
3407
  @staticmethod
3408
  def _DiagnoseByOS(rlist):
3409
    """Remaps a per-node return list into an a per-os per-node dictionary
3410

3411
    @param rlist: a map with node names as keys and OS objects as values
3412

3413
    @rtype: dict
3414
    @return: a dictionary with osnames as keys and as value another
3415
        map, with nodes as keys and tuples of (path, status, diagnose,
3416
        variants, parameters, api_versions) as values, eg::
3417

3418
          {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
3419
                                     (/srv/..., False, "invalid api")],
3420
                           "node2": [(/srv/..., True, "", [], [])]}
3421
          }
3422

3423
    """
3424
    all_os = {}
3425
    # we build here the list of nodes that didn't fail the RPC (at RPC
3426
    # level), so that nodes with a non-responding node daemon don't
3427
    # make all OSes invalid
3428
    good_nodes = [node_name for node_name in rlist
3429
                  if not rlist[node_name].fail_msg]
3430
    for node_name, nr in rlist.items():
3431
      if nr.fail_msg or not nr.payload:
3432
        continue
3433
      for (name, path, status, diagnose, variants,
3434
           params, api_versions) in nr.payload:
3435
        if name not in all_os:
3436
          # build a list of nodes for this os containing empty lists
3437
          # for each node in node_list
3438
          all_os[name] = {}
3439
          for nname in good_nodes:
3440
            all_os[name][nname] = []
3441
        # convert params from [name, help] to (name, help)
3442
        params = [tuple(v) for v in params]
3443
        all_os[name][node_name].append((path, status, diagnose,
3444
                                        variants, params, api_versions))
3445
    return all_os
3446

    
3447
  def Exec(self, feedback_fn):
3448
    """Compute the list of OSes.
3449

3450
    """
3451
    valid_nodes = [node.name
3452
                   for node in self.cfg.GetAllNodesInfo().values()
3453
                   if not node.offline and node.vm_capable]
3454
    node_data = self.rpc.call_os_diagnose(valid_nodes)
3455
    pol = self._DiagnoseByOS(node_data)
3456
    output = []
3457
    cluster = self.cfg.GetClusterInfo()
3458

    
3459
    for os_name in utils.NiceSort(pol.keys()):
3460
      os_data = pol[os_name]
3461
      row = []
3462
      valid = True
3463
      (variants, params, api_versions) = null_state = (set(), set(), set())
3464
      for idx, osl in enumerate(os_data.values()):
3465
        valid = bool(valid and osl and osl[0][1])
3466
        if not valid:
3467
          (variants, params, api_versions) = null_state
3468
          break
3469
        node_variants, node_params, node_api = osl[0][3:6]
3470
        if idx == 0: # first entry
3471
          variants = set(node_variants)
3472
          params = set(node_params)
3473
          api_versions = set(node_api)
3474
        else: # keep consistency
3475
          variants.intersection_update(node_variants)
3476
          params.intersection_update(node_params)
3477
          api_versions.intersection_update(node_api)
3478

    
3479
      is_hid = os_name in cluster.hidden_os
3480
      is_blk = os_name in cluster.blacklisted_os
3481
      if ((self._HID not in self.op.output_fields and is_hid) or
3482
          (self._BLK not in self.op.output_fields and is_blk) or
3483
          (self._VLD not in self.op.output_fields and not valid)):
3484
        continue
3485

    
3486
      for field in self.op.output_fields:
3487
        if field == "name":
3488
          val = os_name
3489
        elif field == self._VLD:
3490
          val = valid
3491
        elif field == "node_status":
3492
          # this is just a copy of the dict
3493
          val = {}
3494
          for node_name, nos_list in os_data.items():
3495
            val[node_name] = nos_list
3496
        elif field == "variants":
3497
          val = utils.NiceSort(list(variants))
3498
        elif field == "parameters":
3499
          val = list(params)
3500
        elif field == "api_versions":
3501
          val = list(api_versions)
3502
        elif field == self._HID:
3503
          val = is_hid
3504
        elif field == self._BLK:
3505
          val = is_blk
3506
        else:
3507
          raise errors.ParameterError(field)
3508
        row.append(val)
3509
      output.append(row)
3510

    
3511
    return output
3512

    
3513

    
3514
class LUNodeRemove(LogicalUnit):
3515
  """Logical unit for removing a node.
3516

3517
  """
3518
  HPATH = "node-remove"
3519
  HTYPE = constants.HTYPE_NODE
3520

    
3521
  def BuildHooksEnv(self):
3522
    """Build hooks env.
3523

3524
    This doesn't run on the target node in the pre phase as a failed
3525
    node would then be impossible to remove.
3526

3527
    """
3528
    env = {
3529
      "OP_TARGET": self.op.node_name,
3530
      "NODE_NAME": self.op.node_name,
3531
      }
3532
    all_nodes = self.cfg.GetNodeList()
3533
    try:
3534
      all_nodes.remove(self.op.node_name)
3535
    except ValueError:
3536
      logging.warning("Node %s which is about to be removed not found"
3537
                      " in the all nodes list", self.op.node_name)
3538
    return env, all_nodes, all_nodes
3539

    
3540
  def CheckPrereq(self):
3541
    """Check prerequisites.
3542

3543
    This checks:
3544
     - the node exists in the configuration
3545
     - it does not have primary or secondary instances
3546
     - it's not the master
3547

3548
    Any errors are signaled by raising errors.OpPrereqError.
3549

3550
    """
3551
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3552
    node = self.cfg.GetNodeInfo(self.op.node_name)
3553
    assert node is not None
3554

    
3555
    instance_list = self.cfg.GetInstanceList()
3556

    
3557
    masternode = self.cfg.GetMasterNode()
3558
    if node.name == masternode:
3559
      raise errors.OpPrereqError("Node is the master node,"
3560
                                 " you need to failover first.",
3561
                                 errors.ECODE_INVAL)
3562

    
3563
    for instance_name in instance_list:
3564
      instance = self.cfg.GetInstanceInfo(instance_name)
3565
      if node.name in instance.all_nodes:
3566
        raise errors.OpPrereqError("Instance %s is still running on the node,"
3567
                                   " please remove first." % instance_name,
3568
                                   errors.ECODE_INVAL)
3569
    self.op.node_name = node.name
3570
    self.node = node
3571

    
3572
  def Exec(self, feedback_fn):
3573
    """Removes the node from the cluster.
3574

3575
    """
3576
    node = self.node
3577
    logging.info("Stopping the node daemon and removing configs from node %s",
3578
                 node.name)
3579

    
3580
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
3581

    
3582
    # Promote nodes to master candidate as needed
3583
    _AdjustCandidatePool(self, exceptions=[node.name])
3584
    self.context.RemoveNode(node.name)
3585

    
3586
    # Run post hooks on the node before it's removed
3587
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
3588
    try:
3589
      hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
3590
    except:
3591
      # pylint: disable-msg=W0702
3592
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
3593

    
3594
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
3595
    msg = result.fail_msg
3596
    if msg:
3597
      self.LogWarning("Errors encountered on the remote node while leaving"
3598
                      " the cluster: %s", msg)
3599

    
3600
    # Remove node from our /etc/hosts
3601
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3602
      master_node = self.cfg.GetMasterNode()
3603
      result = self.rpc.call_etc_hosts_modify(master_node,
3604
                                              constants.ETC_HOSTS_REMOVE,
3605
                                              node.name, None)
3606
      result.Raise("Can't update hosts file with new host data")
3607
      _RedistributeAncillaryFiles(self)
3608

    
3609

    
3610
class _NodeQuery(_QueryBase):
3611
  FIELDS = query.NODE_FIELDS
3612

    
3613
  def ExpandNames(self, lu):
3614
    lu.needed_locks = {}
3615
    lu.share_locks[locking.LEVEL_NODE] = 1
3616

    
3617
    if self.names:
3618
      self.wanted = _GetWantedNodes(lu, self.names)
3619
    else:
3620
      self.wanted = locking.ALL_SET
3621

    
3622
    self.do_locking = (self.use_locking and
3623
                       query.NQ_LIVE in self.requested_data)
3624

    
3625
    if self.do_locking:
3626
      # if we don't request only static fields, we need to lock the nodes
3627
      lu.needed_locks[locking.LEVEL_NODE] = self.wanted
3628

    
3629
  def DeclareLocks(self, lu, level):
3630
    pass
3631

    
3632
  def _GetQueryData(self, lu):
3633
    """Computes the list of nodes and their attributes.
3634

3635
    """
3636
    all_info = lu.cfg.GetAllNodesInfo()
3637

    
3638
    nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
3639

    
3640
    # Gather data as requested
3641
    if query.NQ_LIVE in self.requested_data:
3642
      # filter out non-vm_capable nodes
3643
      toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
3644

    
3645
      node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(),
3646
                                        lu.cfg.GetHypervisorType())
3647
      live_data = dict((name, nresult.payload)
3648
                       for (name, nresult) in node_data.items()
3649
                       if not nresult.fail_msg and nresult.payload)
3650
    else:
3651
      live_data = None
3652

    
3653
    if query.NQ_INST in self.requested_data:
3654
      node_to_primary = dict([(name, set()) for name in nodenames])
3655
      node_to_secondary = dict([(name, set()) for name in nodenames])
3656

    
3657
      inst_data = lu.cfg.GetAllInstancesInfo()
3658

    
3659
      for inst in inst_data.values():
3660
        if inst.primary_node in node_to_primary:
3661
          node_to_primary[inst.primary_node].add(inst.name)
3662
        for secnode in inst.secondary_nodes:
3663
          if secnode in node_to_secondary:
3664
            node_to_secondary[secnode].add(inst.name)
3665
    else:
3666
      node_to_primary = None
3667
      node_to_secondary = None
3668

    
3669
    if query.NQ_OOB in self.requested_data:
3670
      oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
3671
                         for name, node in all_info.iteritems())
3672
    else:
3673
      oob_support = None
3674

    
3675
    if query.NQ_GROUP in self.requested_data:
3676
      groups = lu.cfg.GetAllNodeGroupsInfo()
3677
    else:
3678
      groups = {}
3679

    
3680
    return query.NodeQueryData([all_info[name] for name in nodenames],
3681
                               live_data, lu.cfg.GetMasterNode(),
3682
                               node_to_primary, node_to_secondary, groups,
3683
                               oob_support, lu.cfg.GetClusterInfo())
3684

    
3685

    
3686
class LUNodeQuery(NoHooksLU):
3687
  """Logical unit for querying nodes.
3688

3689
  """
3690
  # pylint: disable-msg=W0142
3691
  REQ_BGL = False
3692

    
3693
  def CheckArguments(self):
3694
    self.nq = _NodeQuery(self.op.names, self.op.output_fields,
3695
                         self.op.use_locking)
3696

    
3697
  def ExpandNames(self):
3698
    self.nq.ExpandNames(self)
3699

    
3700
  def Exec(self, feedback_fn):
3701
    return self.nq.OldStyleQuery(self)
3702

    
3703

    
3704
class LUNodeQueryvols(NoHooksLU):
3705
  """Logical unit for getting volumes on node(s).
3706

3707
  """
3708
  REQ_BGL = False
3709
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3710
  _FIELDS_STATIC = utils.FieldSet("node")
3711

    
3712
  def CheckArguments(self):
3713
    _CheckOutputFields(static=self._FIELDS_STATIC,
3714
                       dynamic=self._FIELDS_DYNAMIC,
3715
                       selected=self.op.output_fields)
3716

    
3717
  def ExpandNames(self):
3718
    self.needed_locks = {}
3719
    self.share_locks[locking.LEVEL_NODE] = 1
3720
    if not self.op.nodes:
3721
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3722
    else:
3723
      self.needed_locks[locking.LEVEL_NODE] = \
3724
        _GetWantedNodes(self, self.op.nodes)
3725

    
3726
  def Exec(self, feedback_fn):
3727
    """Computes the list of nodes and their attributes.
3728

3729
    """
3730
    nodenames = self.acquired_locks[locking.LEVEL_NODE]
3731
    volumes = self.rpc.call_node_volumes(nodenames)
3732

    
3733
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
3734
             in self.cfg.GetInstanceList()]
3735

    
3736
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3737

    
3738
    output = []
3739
    for node in nodenames:
3740
      nresult = volumes[node]
3741
      if nresult.offline:
3742
        continue
3743
      msg = nresult.fail_msg
3744
      if msg:
3745
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3746
        continue
3747

    
3748
      node_vols = nresult.payload[:]
3749
      node_vols.sort(key=lambda vol: vol['dev'])
3750

    
3751
      for vol in node_vols:
3752
        node_output = []
3753
        for field in self.op.output_fields:
3754
          if field == "node":
3755
            val = node
3756
          elif field == "phys":
3757
            val = vol['dev']
3758
          elif field == "vg":
3759
            val = vol['vg']
3760
          elif field == "name":
3761
            val = vol['name']
3762
          elif field == "size":
3763
            val = int(float(vol['size']))
3764
          elif field == "instance":
3765
            for inst in ilist:
3766
              if node not in lv_by_node[inst]:
3767
                continue
3768
              if vol['name'] in lv_by_node[inst][node]:
3769
                val = inst.name
3770
                break
3771
            else:
3772
              val = '-'
3773
          else:
3774
            raise errors.ParameterError(field)
3775
          node_output.append(str(val))
3776

    
3777
        output.append(node_output)
3778

    
3779
    return output
3780

    
3781

    
3782
class LUNodeQueryStorage(NoHooksLU):
3783
  """Logical unit for getting information on storage units on node(s).
3784

3785
  """
3786
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3787
  REQ_BGL = False
3788

    
3789
  def CheckArguments(self):
3790
    _CheckOutputFields(static=self._FIELDS_STATIC,
3791
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3792
                       selected=self.op.output_fields)
3793

    
3794
  def ExpandNames(self):
3795
    self.needed_locks = {}
3796
    self.share_locks[locking.LEVEL_NODE] = 1
3797

    
3798
    if self.op.nodes:
3799
      self.needed_locks[locking.LEVEL_NODE] = \
3800
        _GetWantedNodes(self, self.op.nodes)
3801
    else:
3802
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3803

    
3804
  def Exec(self, feedback_fn):
3805
    """Computes the list of nodes and their attributes.
3806

3807
    """
3808
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3809

    
3810
    # Always get name to sort by
3811
    if constants.SF_NAME in self.op.output_fields:
3812
      fields = self.op.output_fields[:]
3813
    else:
3814
      fields = [constants.SF_NAME] + self.op.output_fields
3815

    
3816
    # Never ask for node or type as it's only known to the LU
3817
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
3818
      while extra in fields:
3819
        fields.remove(extra)
3820

    
3821
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3822
    name_idx = field_idx[constants.SF_NAME]
3823

    
3824
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3825
    data = self.rpc.call_storage_list(self.nodes,
3826
                                      self.op.storage_type, st_args,
3827
                                      self.op.name, fields)
3828

    
3829
    result = []
3830

    
3831
    for node in utils.NiceSort(self.nodes):
3832
      nresult = data[node]
3833
      if nresult.offline:
3834
        continue
3835

    
3836
      msg = nresult.fail_msg
3837
      if msg:
3838
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3839
        continue
3840

    
3841
      rows = dict([(row[name_idx], row) for row in nresult.payload])
3842

    
3843
      for name in utils.NiceSort(rows.keys()):
3844
        row = rows[name]
3845

    
3846
        out = []
3847

    
3848
        for field in self.op.output_fields:
3849
          if field == constants.SF_NODE:
3850
            val = node
3851
          elif field == constants.SF_TYPE:
3852
            val = self.op.storage_type
3853
          elif field in field_idx:
3854
            val = row[field_idx[field]]
3855
          else:
3856
            raise errors.ParameterError(field)
3857

    
3858
          out.append(val)
3859

    
3860
        result.append(out)
3861

    
3862
    return result
3863

    
3864

    
3865
class _InstanceQuery(_QueryBase):
3866
  FIELDS = query.INSTANCE_FIELDS
3867

    
3868
  def ExpandNames(self, lu):
3869
    lu.needed_locks = {}
3870
    lu.share_locks[locking.LEVEL_INSTANCE] = 1
3871
    lu.share_locks[locking.LEVEL_NODE] = 1
3872

    
3873
    if self.names:
3874
      self.wanted = _GetWantedInstances(lu, self.names)
3875
    else:
3876
      self.wanted = locking.ALL_SET
3877

    
3878
    self.do_locking = (self.use_locking and
3879
                       query.IQ_LIVE in self.requested_data)
3880
    if self.do_locking:
3881
      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3882
      lu.needed_locks[locking.LEVEL_NODE] = []
3883
      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3884

    
3885
  def DeclareLocks(self, lu, level):
3886
    if level == locking.LEVEL_NODE and self.do_locking:
3887
      lu._LockInstancesNodes() # pylint: disable-msg=W0212
3888

    
3889
  def _GetQueryData(self, lu):
3890
    """Computes the list of instances and their attributes.
3891

3892
    """
3893
    cluster = lu.cfg.GetClusterInfo()
3894
    all_info = lu.cfg.GetAllInstancesInfo()
3895

    
3896
    instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
3897

    
3898
    instance_list = [all_info[name] for name in instance_names]
3899
    nodes = frozenset(itertools.chain(*(inst.all_nodes
3900
                                        for inst in instance_list)))
3901
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3902
    bad_nodes = []
3903
    offline_nodes = []
3904
    wrongnode_inst = set()
3905

    
3906
    # Gather data as requested
3907
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
3908
      live_data = {}
3909
      node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
3910
      for name in nodes:
3911
        result = node_data[name]
3912
        if result.offline:
3913
          # offline nodes will be in both lists
3914
          assert result.fail_msg
3915
          offline_nodes.append(name)
3916
        if result.fail_msg:
3917
          bad_nodes.append(name)
3918
        elif result.payload:
3919
          for inst in result.payload:
3920
            if inst in all_info:
3921
              if all_info[inst].primary_node == name:
3922
                live_data.update(result.payload)
3923
              else:
3924
                wrongnode_inst.add(inst)
3925
            else:
3926
              # orphan instance; we don't list it here as we don't
3927
              # handle this case yet in the output of instance listing
3928
              logging.warning("Orphan instance '%s' found on node %s",
3929
                              inst, name)
3930
        # else no instance is alive
3931
    else:
3932
      live_data = {}
3933

    
3934
    if query.IQ_DISKUSAGE in self.requested_data:
3935
      disk_usage = dict((inst.name,
3936
                         _ComputeDiskSize(inst.disk_template,
3937
                                          [{"size": disk.size}
3938
                                           for disk in inst.disks]))
3939
                        for inst in instance_list)
3940
    else:
3941
      disk_usage = None
3942

    
3943
    if query.IQ_CONSOLE in self.requested_data:
3944
      consinfo = {}
3945
      for inst in instance_list:
3946
        if inst.name in live_data:
3947
          # Instance is running
3948
          consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
3949
        else:
3950
          consinfo[inst.name] = None
3951
      assert set(consinfo.keys()) == set(instance_names)
3952
    else:
3953
      consinfo = None
3954

    
3955
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
3956
                                   disk_usage, offline_nodes, bad_nodes,
3957
                                   live_data, wrongnode_inst, consinfo)
3958

    
3959

    
3960
class LUQuery(NoHooksLU):
3961
  """Query for resources/items of a certain kind.
3962

3963
  """
3964
  # pylint: disable-msg=W0142
3965
  REQ_BGL = False
3966

    
3967
  def CheckArguments(self):
3968
    qcls = _GetQueryImplementation(self.op.what)
3969
    names = qlang.ReadSimpleFilter("name", self.op.filter)
3970

    
3971
    self.impl = qcls(names, self.op.fields, False)
3972

    
3973
  def ExpandNames(self):
3974
    self.impl.ExpandNames(self)
3975

    
3976
  def DeclareLocks(self, level):
3977
    self.impl.DeclareLocks(self, level)
3978

    
3979
  def Exec(self, feedback_fn):
3980
    return self.impl.NewStyleQuery(self)
3981

    
3982

    
3983
class LUQueryFields(NoHooksLU):
3984
  """Query for resources/items of a certain kind.
3985

3986
  """
3987
  # pylint: disable-msg=W0142
3988
  REQ_BGL = False
3989

    
3990
  def CheckArguments(self):
3991
    self.qcls = _GetQueryImplementation(self.op.what)
3992

    
3993
  def ExpandNames(self):
3994
    self.needed_locks = {}
3995

    
3996
  def Exec(self, feedback_fn):
3997
    return self.qcls.FieldsQuery(self.op.fields)
3998

    
3999

    
4000
class LUNodeModifyStorage(NoHooksLU):
4001
  """Logical unit for modifying a storage volume on a node.
4002

4003
  """
4004
  REQ_BGL = False
4005

    
4006
  def CheckArguments(self):
4007
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4008

    
4009
    storage_type = self.op.storage_type
4010

    
4011
    try:
4012
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
4013
    except KeyError:
4014
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
4015
                                 " modified" % storage_type,
4016
                                 errors.ECODE_INVAL)
4017

    
4018
    diff = set(self.op.changes.keys()) - modifiable
4019
    if diff:
4020
      raise errors.OpPrereqError("The following fields can not be modified for"
4021
                                 " storage units of type '%s': %r" %
4022
                                 (storage_type, list(diff)),
4023
                                 errors.ECODE_INVAL)
4024

    
4025
  def ExpandNames(self):
4026
    self.needed_locks = {
4027
      locking.LEVEL_NODE: self.op.node_name,
4028
      }
4029

    
4030
  def Exec(self, feedback_fn):
4031
    """Computes the list of nodes and their attributes.
4032

4033
    """
4034
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4035
    result = self.rpc.call_storage_modify(self.op.node_name,
4036
                                          self.op.storage_type, st_args,
4037
                                          self.op.name, self.op.changes)
4038
    result.Raise("Failed to modify storage unit '%s' on %s" %
4039
                 (self.op.name, self.op.node_name))
4040

    
4041

    
4042
class LUNodeAdd(LogicalUnit):
4043
  """Logical unit for adding node to the cluster.
4044

4045
  """
4046
  HPATH = "node-add"
4047
  HTYPE = constants.HTYPE_NODE
4048
  _NFLAGS = ["master_capable", "vm_capable"]
4049

    
4050
  def CheckArguments(self):
4051
    self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
4052
    # validate/normalize the node name
4053
    self.hostname = netutils.GetHostname(name=self.op.node_name,
4054
                                         family=self.primary_ip_family)
4055
    self.op.node_name = self.hostname.name
4056
    if self.op.readd and self.op.group:
4057
      raise errors.OpPrereqError("Cannot pass a node group when a node is"
4058
                                 " being readded", errors.ECODE_INVAL)
4059

    
4060
  def BuildHooksEnv(self):
4061
    """Build hooks env.
4062

4063
    This will run on all nodes before, and on all nodes + the new node after.
4064

4065
    """
4066
    env = {
4067
      "OP_TARGET": self.op.node_name,
4068
      "NODE_NAME": self.op.node_name,
4069
      "NODE_PIP": self.op.primary_ip,
4070
      "NODE_SIP": self.op.secondary_ip,
4071
      "MASTER_CAPABLE": str(self.op.master_capable),
4072
      "VM_CAPABLE": str(self.op.vm_capable),
4073
      }
4074
    nodes_0 = self.cfg.GetNodeList()
4075
    nodes_1 = nodes_0 + [self.op.node_name, ]
4076
    return env, nodes_0, nodes_1
4077

    
4078
  def CheckPrereq(self):
4079
    """Check prerequisites.
4080

4081
    This checks:
4082
     - the new node is not already in the config
4083
     - it is resolvable
4084
     - its parameters (single/dual homed) matches the cluster
4085

4086
    Any errors are signaled by raising errors.OpPrereqError.
4087

4088
    """
4089
    cfg = self.cfg
4090
    hostname = self.hostname
4091
    node = hostname.name
4092
    primary_ip = self.op.primary_ip = hostname.ip
4093
    if self.op.secondary_ip is None:
4094
      if self.primary_ip_family == netutils.IP6Address.family:
4095
        raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
4096
                                   " IPv4 address must be given as secondary",
4097