Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ eb630f50

History | View | Annotate | Download (347.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0201
25

    
26
# W0201 since most LU attributes are defined in CheckPrereq or similar
27
# functions
28

    
29
import os
30
import os.path
31
import time
32
import re
33
import platform
34
import logging
35
import copy
36
import OpenSSL
37

    
38
from ganeti import ssh
39
from ganeti import utils
40
from ganeti import errors
41
from ganeti import hypervisor
42
from ganeti import locking
43
from ganeti import constants
44
from ganeti import objects
45
from ganeti import serializer
46
from ganeti import ssconf
47
from ganeti import uidpool
48
from ganeti import compat
49
from ganeti import masterd
50

    
51
import ganeti.masterd.instance # pylint: disable-msg=W0611
52

    
53

    
54
class LogicalUnit(object):
55
  """Logical Unit base class.
56

57
  Subclasses must follow these rules:
58
    - implement ExpandNames
59
    - implement CheckPrereq (except when tasklets are used)
60
    - implement Exec (except when tasklets are used)
61
    - implement BuildHooksEnv
62
    - redefine HPATH and HTYPE
63
    - optionally redefine their run requirements:
64
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
65

66
  Note that all commands require root permissions.
67

68
  @ivar dry_run_result: the value (if any) that will be returned to the caller
69
      in dry-run mode (signalled by opcode dry_run parameter)
70

71
  """
72
  HPATH = None
73
  HTYPE = None
74
  _OP_REQP = []
75
  REQ_BGL = True
76

    
77
  def __init__(self, processor, op, context, rpc):
78
    """Constructor for LogicalUnit.
79

80
    This needs to be overridden in derived classes in order to check op
81
    validity.
82

83
    """
84
    self.proc = processor
85
    self.op = op
86
    self.cfg = context.cfg
87
    self.context = context
88
    self.rpc = rpc
89
    # Dicts used to declare locking needs to mcpu
90
    self.needed_locks = None
91
    self.acquired_locks = {}
92
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
93
    self.add_locks = {}
94
    self.remove_locks = {}
95
    # Used to force good behavior when calling helper functions
96
    self.recalculate_locks = {}
97
    self.__ssh = None
98
    # logging
99
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
100
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
101
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
102
    # support for dry-run
103
    self.dry_run_result = None
104
    # support for generic debug attribute
105
    if (not hasattr(self.op, "debug_level") or
106
        not isinstance(self.op.debug_level, int)):
107
      self.op.debug_level = 0
108

    
109
    # Tasklets
110
    self.tasklets = None
111

    
112
    for attr_name in self._OP_REQP:
113
      attr_val = getattr(op, attr_name, None)
114
      if attr_val is None:
115
        raise errors.OpPrereqError("Required parameter '%s' missing" %
116
                                   attr_name, errors.ECODE_INVAL)
117

    
118
    self.CheckArguments()
119

    
120
  def __GetSSH(self):
121
    """Returns the SshRunner object
122

123
    """
124
    if not self.__ssh:
125
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
126
    return self.__ssh
127

    
128
  ssh = property(fget=__GetSSH)
129

    
130
  def CheckArguments(self):
131
    """Check syntactic validity for the opcode arguments.
132

133
    This method is for doing a simple syntactic check and ensure
134
    validity of opcode parameters, without any cluster-related
135
    checks. While the same can be accomplished in ExpandNames and/or
136
    CheckPrereq, doing these separate is better because:
137

138
      - ExpandNames is left as as purely a lock-related function
139
      - CheckPrereq is run after we have acquired locks (and possible
140
        waited for them)
141

142
    The function is allowed to change the self.op attribute so that
143
    later methods can no longer worry about missing parameters.
144

145
    """
146
    pass
147

    
148
  def ExpandNames(self):
149
    """Expand names for this LU.
150

151
    This method is called before starting to execute the opcode, and it should
152
    update all the parameters of the opcode to their canonical form (e.g. a
153
    short node name must be fully expanded after this method has successfully
154
    completed). This way locking, hooks, logging, ecc. can work correctly.
155

156
    LUs which implement this method must also populate the self.needed_locks
157
    member, as a dict with lock levels as keys, and a list of needed lock names
158
    as values. Rules:
159

160
      - use an empty dict if you don't need any lock
161
      - if you don't need any lock at a particular level omit that level
162
      - don't put anything for the BGL level
163
      - if you want all locks at a level use locking.ALL_SET as a value
164

165
    If you need to share locks (rather than acquire them exclusively) at one
166
    level you can modify self.share_locks, setting a true value (usually 1) for
167
    that level. By default locks are not shared.
168

169
    This function can also define a list of tasklets, which then will be
170
    executed in order instead of the usual LU-level CheckPrereq and Exec
171
    functions, if those are not defined by the LU.
172

173
    Examples::
174

175
      # Acquire all nodes and one instance
176
      self.needed_locks = {
177
        locking.LEVEL_NODE: locking.ALL_SET,
178
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
179
      }
180
      # Acquire just two nodes
181
      self.needed_locks = {
182
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
183
      }
184
      # Acquire no locks
185
      self.needed_locks = {} # No, you can't leave it to the default value None
186

187
    """
188
    # The implementation of this method is mandatory only if the new LU is
189
    # concurrent, so that old LUs don't need to be changed all at the same
190
    # time.
191
    if self.REQ_BGL:
192
      self.needed_locks = {} # Exclusive LUs don't need locks.
193
    else:
194
      raise NotImplementedError
195

    
196
  def DeclareLocks(self, level):
197
    """Declare LU locking needs for a level
198

199
    While most LUs can just declare their locking needs at ExpandNames time,
200
    sometimes there's the need to calculate some locks after having acquired
201
    the ones before. This function is called just before acquiring locks at a
202
    particular level, but after acquiring the ones at lower levels, and permits
203
    such calculations. It can be used to modify self.needed_locks, and by
204
    default it does nothing.
205

206
    This function is only called if you have something already set in
207
    self.needed_locks for the level.
208

209
    @param level: Locking level which is going to be locked
210
    @type level: member of ganeti.locking.LEVELS
211

212
    """
213

    
214
  def CheckPrereq(self):
215
    """Check prerequisites for this LU.
216

217
    This method should check that the prerequisites for the execution
218
    of this LU are fulfilled. It can do internode communication, but
219
    it should be idempotent - no cluster or system changes are
220
    allowed.
221

222
    The method should raise errors.OpPrereqError in case something is
223
    not fulfilled. Its return value is ignored.
224

225
    This method should also update all the parameters of the opcode to
226
    their canonical form if it hasn't been done by ExpandNames before.
227

228
    """
229
    if self.tasklets is not None:
230
      for (idx, tl) in enumerate(self.tasklets):
231
        logging.debug("Checking prerequisites for tasklet %s/%s",
232
                      idx + 1, len(self.tasklets))
233
        tl.CheckPrereq()
234
    else:
235
      raise NotImplementedError
236

    
237
  def Exec(self, feedback_fn):
238
    """Execute the LU.
239

240
    This method should implement the actual work. It should raise
241
    errors.OpExecError for failures that are somewhat dealt with in
242
    code, or expected.
243

244
    """
245
    if self.tasklets is not None:
246
      for (idx, tl) in enumerate(self.tasklets):
247
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
248
        tl.Exec(feedback_fn)
249
    else:
250
      raise NotImplementedError
251

    
252
  def BuildHooksEnv(self):
253
    """Build hooks environment for this LU.
254

255
    This method should return a three-node tuple consisting of: a dict
256
    containing the environment that will be used for running the
257
    specific hook for this LU, a list of node names on which the hook
258
    should run before the execution, and a list of node names on which
259
    the hook should run after the execution.
260

261
    The keys of the dict must not have 'GANETI_' prefixed as this will
262
    be handled in the hooks runner. Also note additional keys will be
263
    added by the hooks runner. If the LU doesn't define any
264
    environment, an empty dict (and not None) should be returned.
265

266
    No nodes should be returned as an empty list (and not None).
267

268
    Note that if the HPATH for a LU class is None, this function will
269
    not be called.
270

271
    """
272
    raise NotImplementedError
273

    
274
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
275
    """Notify the LU about the results of its hooks.
276

277
    This method is called every time a hooks phase is executed, and notifies
278
    the Logical Unit about the hooks' result. The LU can then use it to alter
279
    its result based on the hooks.  By default the method does nothing and the
280
    previous result is passed back unchanged but any LU can define it if it
281
    wants to use the local cluster hook-scripts somehow.
282

283
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
284
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
285
    @param hook_results: the results of the multi-node hooks rpc call
286
    @param feedback_fn: function used send feedback back to the caller
287
    @param lu_result: the previous Exec result this LU had, or None
288
        in the PRE phase
289
    @return: the new Exec result, based on the previous result
290
        and hook results
291

292
    """
293
    # API must be kept, thus we ignore the unused argument and could
294
    # be a function warnings
295
    # pylint: disable-msg=W0613,R0201
296
    return lu_result
297

    
298
  def _ExpandAndLockInstance(self):
299
    """Helper function to expand and lock an instance.
300

301
    Many LUs that work on an instance take its name in self.op.instance_name
302
    and need to expand it and then declare the expanded name for locking. This
303
    function does it, and then updates self.op.instance_name to the expanded
304
    name. It also initializes needed_locks as a dict, if this hasn't been done
305
    before.
306

307
    """
308
    if self.needed_locks is None:
309
      self.needed_locks = {}
310
    else:
311
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
312
        "_ExpandAndLockInstance called with instance-level locks set"
313
    self.op.instance_name = _ExpandInstanceName(self.cfg,
314
                                                self.op.instance_name)
315
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
316

    
317
  def _LockInstancesNodes(self, primary_only=False):
318
    """Helper function to declare instances' nodes for locking.
319

320
    This function should be called after locking one or more instances to lock
321
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
322
    with all primary or secondary nodes for instances already locked and
323
    present in self.needed_locks[locking.LEVEL_INSTANCE].
324

325
    It should be called from DeclareLocks, and for safety only works if
326
    self.recalculate_locks[locking.LEVEL_NODE] is set.
327

328
    In the future it may grow parameters to just lock some instance's nodes, or
329
    to just lock primaries or secondary nodes, if needed.
330

331
    If should be called in DeclareLocks in a way similar to::
332

333
      if level == locking.LEVEL_NODE:
334
        self._LockInstancesNodes()
335

336
    @type primary_only: boolean
337
    @param primary_only: only lock primary nodes of locked instances
338

339
    """
340
    assert locking.LEVEL_NODE in self.recalculate_locks, \
341
      "_LockInstancesNodes helper function called with no nodes to recalculate"
342

    
343
    # TODO: check if we're really been called with the instance locks held
344

    
345
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
346
    # future we might want to have different behaviors depending on the value
347
    # of self.recalculate_locks[locking.LEVEL_NODE]
348
    wanted_nodes = []
349
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
350
      instance = self.context.cfg.GetInstanceInfo(instance_name)
351
      wanted_nodes.append(instance.primary_node)
352
      if not primary_only:
353
        wanted_nodes.extend(instance.secondary_nodes)
354

    
355
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
356
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
357
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
358
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
359

    
360
    del self.recalculate_locks[locking.LEVEL_NODE]
361

    
362

    
363
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
364
  """Simple LU which runs no hooks.
365

366
  This LU is intended as a parent for other LogicalUnits which will
367
  run no hooks, in order to reduce duplicate code.
368

369
  """
370
  HPATH = None
371
  HTYPE = None
372

    
373
  def BuildHooksEnv(self):
374
    """Empty BuildHooksEnv for NoHooksLu.
375

376
    This just raises an error.
377

378
    """
379
    assert False, "BuildHooksEnv called for NoHooksLUs"
380

    
381

    
382
class Tasklet:
383
  """Tasklet base class.
384

385
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
386
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
387
  tasklets know nothing about locks.
388

389
  Subclasses must follow these rules:
390
    - Implement CheckPrereq
391
    - Implement Exec
392

393
  """
394
  def __init__(self, lu):
395
    self.lu = lu
396

    
397
    # Shortcuts
398
    self.cfg = lu.cfg
399
    self.rpc = lu.rpc
400

    
401
  def CheckPrereq(self):
402
    """Check prerequisites for this tasklets.
403

404
    This method should check whether the prerequisites for the execution of
405
    this tasklet are fulfilled. It can do internode communication, but it
406
    should be idempotent - no cluster or system changes are allowed.
407

408
    The method should raise errors.OpPrereqError in case something is not
409
    fulfilled. Its return value is ignored.
410

411
    This method should also update all parameters to their canonical form if it
412
    hasn't been done before.
413

414
    """
415
    raise NotImplementedError
416

    
417
  def Exec(self, feedback_fn):
418
    """Execute the tasklet.
419

420
    This method should implement the actual work. It should raise
421
    errors.OpExecError for failures that are somewhat dealt with in code, or
422
    expected.
423

424
    """
425
    raise NotImplementedError
426

    
427

    
428
def _GetWantedNodes(lu, nodes):
429
  """Returns list of checked and expanded node names.
430

431
  @type lu: L{LogicalUnit}
432
  @param lu: the logical unit on whose behalf we execute
433
  @type nodes: list
434
  @param nodes: list of node names or None for all nodes
435
  @rtype: list
436
  @return: the list of nodes, sorted
437
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
438

439
  """
440
  if not isinstance(nodes, list):
441
    raise errors.OpPrereqError("Invalid argument type 'nodes'",
442
                               errors.ECODE_INVAL)
443

    
444
  if not nodes:
445
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
446
      " non-empty list of nodes whose name is to be expanded.")
447

    
448
  wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes]
449
  return utils.NiceSort(wanted)
450

    
451

    
452
def _GetWantedInstances(lu, instances):
453
  """Returns list of checked and expanded instance names.
454

455
  @type lu: L{LogicalUnit}
456
  @param lu: the logical unit on whose behalf we execute
457
  @type instances: list
458
  @param instances: list of instance names or None for all instances
459
  @rtype: list
460
  @return: the list of instances, sorted
461
  @raise errors.OpPrereqError: if the instances parameter is wrong type
462
  @raise errors.OpPrereqError: if any of the passed instances is not found
463

464
  """
465
  if not isinstance(instances, list):
466
    raise errors.OpPrereqError("Invalid argument type 'instances'",
467
                               errors.ECODE_INVAL)
468

    
469
  if instances:
470
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
471
  else:
472
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
473
  return wanted
474

    
475

    
476
def _CheckOutputFields(static, dynamic, selected):
477
  """Checks whether all selected fields are valid.
478

479
  @type static: L{utils.FieldSet}
480
  @param static: static fields set
481
  @type dynamic: L{utils.FieldSet}
482
  @param dynamic: dynamic fields set
483

484
  """
485
  f = utils.FieldSet()
486
  f.Extend(static)
487
  f.Extend(dynamic)
488

    
489
  delta = f.NonMatching(selected)
490
  if delta:
491
    raise errors.OpPrereqError("Unknown output fields selected: %s"
492
                               % ",".join(delta), errors.ECODE_INVAL)
493

    
494

    
495
def _CheckBooleanOpField(op, name):
496
  """Validates boolean opcode parameters.
497

498
  This will ensure that an opcode parameter is either a boolean value,
499
  or None (but that it always exists).
500

501
  """
502
  val = getattr(op, name, None)
503
  if not (val is None or isinstance(val, bool)):
504
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
505
                               (name, str(val)), errors.ECODE_INVAL)
506
  setattr(op, name, val)
507

    
508

    
509
def _CheckGlobalHvParams(params):
510
  """Validates that given hypervisor params are not global ones.
511

512
  This will ensure that instances don't get customised versions of
513
  global params.
514

515
  """
516
  used_globals = constants.HVC_GLOBALS.intersection(params)
517
  if used_globals:
518
    msg = ("The following hypervisor parameters are global and cannot"
519
           " be customized at instance level, please modify them at"
520
           " cluster level: %s" % utils.CommaJoin(used_globals))
521
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
522

    
523

    
524
def _CheckNodeOnline(lu, node):
525
  """Ensure that a given node is online.
526

527
  @param lu: the LU on behalf of which we make the check
528
  @param node: the node to check
529
  @raise errors.OpPrereqError: if the node is offline
530

531
  """
532
  if lu.cfg.GetNodeInfo(node).offline:
533
    raise errors.OpPrereqError("Can't use offline node %s" % node,
534
                               errors.ECODE_INVAL)
535

    
536

    
537
def _CheckNodeNotDrained(lu, node):
538
  """Ensure that a given node is not drained.
539

540
  @param lu: the LU on behalf of which we make the check
541
  @param node: the node to check
542
  @raise errors.OpPrereqError: if the node is drained
543

544
  """
545
  if lu.cfg.GetNodeInfo(node).drained:
546
    raise errors.OpPrereqError("Can't use drained node %s" % node,
547
                               errors.ECODE_INVAL)
548

    
549

    
550
def _CheckNodeHasOS(lu, node, os_name, force_variant):
551
  """Ensure that a node supports a given OS.
552

553
  @param lu: the LU on behalf of which we make the check
554
  @param node: the node to check
555
  @param os_name: the OS to query about
556
  @param force_variant: whether to ignore variant errors
557
  @raise errors.OpPrereqError: if the node is not supporting the OS
558

559
  """
560
  result = lu.rpc.call_os_get(node, os_name)
561
  result.Raise("OS '%s' not in supported OS list for node %s" %
562
               (os_name, node),
563
               prereq=True, ecode=errors.ECODE_INVAL)
564
  if not force_variant:
565
    _CheckOSVariant(result.payload, os_name)
566

    
567

    
568
def _RequireFileStorage():
569
  """Checks that file storage is enabled.
570

571
  @raise errors.OpPrereqError: when file storage is disabled
572

573
  """
574
  if not constants.ENABLE_FILE_STORAGE:
575
    raise errors.OpPrereqError("File storage disabled at configure time",
576
                               errors.ECODE_INVAL)
577

    
578

    
579
def _CheckDiskTemplate(template):
580
  """Ensure a given disk template is valid.
581

582
  """
583
  if template not in constants.DISK_TEMPLATES:
584
    msg = ("Invalid disk template name '%s', valid templates are: %s" %
585
           (template, utils.CommaJoin(constants.DISK_TEMPLATES)))
586
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
587
  if template == constants.DT_FILE:
588
    _RequireFileStorage()
589

    
590

    
591
def _CheckStorageType(storage_type):
592
  """Ensure a given storage type is valid.
593

594
  """
595
  if storage_type not in constants.VALID_STORAGE_TYPES:
596
    raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
597
                               errors.ECODE_INVAL)
598
  if storage_type == constants.ST_FILE:
599
    _RequireFileStorage()
600

    
601

    
602
def _GetClusterDomainSecret():
603
  """Reads the cluster domain secret.
604

605
  """
606
  return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
607
                               strict=True)
608

    
609

    
610
def _CheckInstanceDown(lu, instance, reason):
611
  """Ensure that an instance is not running."""
612
  if instance.admin_up:
613
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
614
                               (instance.name, reason), errors.ECODE_STATE)
615

    
616
  pnode = instance.primary_node
617
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
618
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
619
              prereq=True, ecode=errors.ECODE_ENVIRON)
620

    
621
  if instance.name in ins_l.payload:
622
    raise errors.OpPrereqError("Instance %s is running, %s" %
623
                               (instance.name, reason), errors.ECODE_STATE)
624

    
625

    
626
def _ExpandItemName(fn, name, kind):
627
  """Expand an item name.
628

629
  @param fn: the function to use for expansion
630
  @param name: requested item name
631
  @param kind: text description ('Node' or 'Instance')
632
  @return: the resolved (full) name
633
  @raise errors.OpPrereqError: if the item is not found
634

635
  """
636
  full_name = fn(name)
637
  if full_name is None:
638
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
639
                               errors.ECODE_NOENT)
640
  return full_name
641

    
642

    
643
def _ExpandNodeName(cfg, name):
644
  """Wrapper over L{_ExpandItemName} for nodes."""
645
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
646

    
647

    
648
def _ExpandInstanceName(cfg, name):
649
  """Wrapper over L{_ExpandItemName} for instance."""
650
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
651

    
652

    
653
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
654
                          memory, vcpus, nics, disk_template, disks,
655
                          bep, hvp, hypervisor_name):
656
  """Builds instance related env variables for hooks
657

658
  This builds the hook environment from individual variables.
659

660
  @type name: string
661
  @param name: the name of the instance
662
  @type primary_node: string
663
  @param primary_node: the name of the instance's primary node
664
  @type secondary_nodes: list
665
  @param secondary_nodes: list of secondary nodes as strings
666
  @type os_type: string
667
  @param os_type: the name of the instance's OS
668
  @type status: boolean
669
  @param status: the should_run status of the instance
670
  @type memory: string
671
  @param memory: the memory size of the instance
672
  @type vcpus: string
673
  @param vcpus: the count of VCPUs the instance has
674
  @type nics: list
675
  @param nics: list of tuples (ip, mac, mode, link) representing
676
      the NICs the instance has
677
  @type disk_template: string
678
  @param disk_template: the disk template of the instance
679
  @type disks: list
680
  @param disks: the list of (size, mode) pairs
681
  @type bep: dict
682
  @param bep: the backend parameters for the instance
683
  @type hvp: dict
684
  @param hvp: the hypervisor parameters for the instance
685
  @type hypervisor_name: string
686
  @param hypervisor_name: the hypervisor for the instance
687
  @rtype: dict
688
  @return: the hook environment for this instance
689

690
  """
691
  if status:
692
    str_status = "up"
693
  else:
694
    str_status = "down"
695
  env = {
696
    "OP_TARGET": name,
697
    "INSTANCE_NAME": name,
698
    "INSTANCE_PRIMARY": primary_node,
699
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
700
    "INSTANCE_OS_TYPE": os_type,
701
    "INSTANCE_STATUS": str_status,
702
    "INSTANCE_MEMORY": memory,
703
    "INSTANCE_VCPUS": vcpus,
704
    "INSTANCE_DISK_TEMPLATE": disk_template,
705
    "INSTANCE_HYPERVISOR": hypervisor_name,
706
  }
707

    
708
  if nics:
709
    nic_count = len(nics)
710
    for idx, (ip, mac, mode, link) in enumerate(nics):
711
      if ip is None:
712
        ip = ""
713
      env["INSTANCE_NIC%d_IP" % idx] = ip
714
      env["INSTANCE_NIC%d_MAC" % idx] = mac
715
      env["INSTANCE_NIC%d_MODE" % idx] = mode
716
      env["INSTANCE_NIC%d_LINK" % idx] = link
717
      if mode == constants.NIC_MODE_BRIDGED:
718
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
719
  else:
720
    nic_count = 0
721

    
722
  env["INSTANCE_NIC_COUNT"] = nic_count
723

    
724
  if disks:
725
    disk_count = len(disks)
726
    for idx, (size, mode) in enumerate(disks):
727
      env["INSTANCE_DISK%d_SIZE" % idx] = size
728
      env["INSTANCE_DISK%d_MODE" % idx] = mode
729
  else:
730
    disk_count = 0
731

    
732
  env["INSTANCE_DISK_COUNT"] = disk_count
733

    
734
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
735
    for key, value in source.items():
736
      env["INSTANCE_%s_%s" % (kind, key)] = value
737

    
738
  return env
739

    
740

    
741
def _NICListToTuple(lu, nics):
742
  """Build a list of nic information tuples.
743

744
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
745
  value in LUQueryInstanceData.
746

747
  @type lu:  L{LogicalUnit}
748
  @param lu: the logical unit on whose behalf we execute
749
  @type nics: list of L{objects.NIC}
750
  @param nics: list of nics to convert to hooks tuples
751

752
  """
753
  hooks_nics = []
754
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
755
  for nic in nics:
756
    ip = nic.ip
757
    mac = nic.mac
758
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
759
    mode = filled_params[constants.NIC_MODE]
760
    link = filled_params[constants.NIC_LINK]
761
    hooks_nics.append((ip, mac, mode, link))
762
  return hooks_nics
763

    
764

    
765
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
766
  """Builds instance related env variables for hooks from an object.
767

768
  @type lu: L{LogicalUnit}
769
  @param lu: the logical unit on whose behalf we execute
770
  @type instance: L{objects.Instance}
771
  @param instance: the instance for which we should build the
772
      environment
773
  @type override: dict
774
  @param override: dictionary with key/values that will override
775
      our values
776
  @rtype: dict
777
  @return: the hook environment dictionary
778

779
  """
780
  cluster = lu.cfg.GetClusterInfo()
781
  bep = cluster.FillBE(instance)
782
  hvp = cluster.FillHV(instance)
783
  args = {
784
    'name': instance.name,
785
    'primary_node': instance.primary_node,
786
    'secondary_nodes': instance.secondary_nodes,
787
    'os_type': instance.os,
788
    'status': instance.admin_up,
789
    'memory': bep[constants.BE_MEMORY],
790
    'vcpus': bep[constants.BE_VCPUS],
791
    'nics': _NICListToTuple(lu, instance.nics),
792
    'disk_template': instance.disk_template,
793
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
794
    'bep': bep,
795
    'hvp': hvp,
796
    'hypervisor_name': instance.hypervisor,
797
  }
798
  if override:
799
    args.update(override)
800
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
801

    
802

    
803
def _AdjustCandidatePool(lu, exceptions):
804
  """Adjust the candidate pool after node operations.
805

806
  """
807
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
808
  if mod_list:
809
    lu.LogInfo("Promoted nodes to master candidate role: %s",
810
               utils.CommaJoin(node.name for node in mod_list))
811
    for name in mod_list:
812
      lu.context.ReaddNode(name)
813
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
814
  if mc_now > mc_max:
815
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
816
               (mc_now, mc_max))
817

    
818

    
819
def _DecideSelfPromotion(lu, exceptions=None):
820
  """Decide whether I should promote myself as a master candidate.
821

822
  """
823
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
824
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
825
  # the new node will increase mc_max with one, so:
826
  mc_should = min(mc_should + 1, cp_size)
827
  return mc_now < mc_should
828

    
829

    
830
def _CheckNicsBridgesExist(lu, target_nics, target_node,
831
                               profile=constants.PP_DEFAULT):
832
  """Check that the brigdes needed by a list of nics exist.
833

834
  """
835
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
836
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
837
                for nic in target_nics]
838
  brlist = [params[constants.NIC_LINK] for params in paramslist
839
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
840
  if brlist:
841
    result = lu.rpc.call_bridges_exist(target_node, brlist)
842
    result.Raise("Error checking bridges on destination node '%s'" %
843
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
844

    
845

    
846
def _CheckInstanceBridgesExist(lu, instance, node=None):
847
  """Check that the brigdes needed by an instance exist.
848

849
  """
850
  if node is None:
851
    node = instance.primary_node
852
  _CheckNicsBridgesExist(lu, instance.nics, node)
853

    
854

    
855
def _CheckOSVariant(os_obj, name):
856
  """Check whether an OS name conforms to the os variants specification.
857

858
  @type os_obj: L{objects.OS}
859
  @param os_obj: OS object to check
860
  @type name: string
861
  @param name: OS name passed by the user, to check for validity
862

863
  """
864
  if not os_obj.supported_variants:
865
    return
866
  try:
867
    variant = name.split("+", 1)[1]
868
  except IndexError:
869
    raise errors.OpPrereqError("OS name must include a variant",
870
                               errors.ECODE_INVAL)
871

    
872
  if variant not in os_obj.supported_variants:
873
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
874

    
875

    
876
def _GetNodeInstancesInner(cfg, fn):
877
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
878

    
879

    
880
def _GetNodeInstances(cfg, node_name):
881
  """Returns a list of all primary and secondary instances on a node.
882

883
  """
884

    
885
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
886

    
887

    
888
def _GetNodePrimaryInstances(cfg, node_name):
889
  """Returns primary instances on a node.
890

891
  """
892
  return _GetNodeInstancesInner(cfg,
893
                                lambda inst: node_name == inst.primary_node)
894

    
895

    
896
def _GetNodeSecondaryInstances(cfg, node_name):
897
  """Returns secondary instances on a node.
898

899
  """
900
  return _GetNodeInstancesInner(cfg,
901
                                lambda inst: node_name in inst.secondary_nodes)
902

    
903

    
904
def _GetStorageTypeArgs(cfg, storage_type):
905
  """Returns the arguments for a storage type.
906

907
  """
908
  # Special case for file storage
909
  if storage_type == constants.ST_FILE:
910
    # storage.FileStorage wants a list of storage directories
911
    return [[cfg.GetFileStorageDir()]]
912

    
913
  return []
914

    
915

    
916
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
917
  faulty = []
918

    
919
  for dev in instance.disks:
920
    cfg.SetDiskID(dev, node_name)
921

    
922
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
923
  result.Raise("Failed to get disk status from node %s" % node_name,
924
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
925

    
926
  for idx, bdev_status in enumerate(result.payload):
927
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
928
      faulty.append(idx)
929

    
930
  return faulty
931

    
932

    
933
class LUPostInitCluster(LogicalUnit):
934
  """Logical unit for running hooks after cluster initialization.
935

936
  """
937
  HPATH = "cluster-init"
938
  HTYPE = constants.HTYPE_CLUSTER
939
  _OP_REQP = []
940

    
941
  def BuildHooksEnv(self):
942
    """Build hooks env.
943

944
    """
945
    env = {"OP_TARGET": self.cfg.GetClusterName()}
946
    mn = self.cfg.GetMasterNode()
947
    return env, [], [mn]
948

    
949
  def CheckPrereq(self):
950
    """No prerequisites to check.
951

952
    """
953
    return True
954

    
955
  def Exec(self, feedback_fn):
956
    """Nothing to do.
957

958
    """
959
    return True
960

    
961

    
962
class LUDestroyCluster(LogicalUnit):
963
  """Logical unit for destroying the cluster.
964

965
  """
966
  HPATH = "cluster-destroy"
967
  HTYPE = constants.HTYPE_CLUSTER
968
  _OP_REQP = []
969

    
970
  def BuildHooksEnv(self):
971
    """Build hooks env.
972

973
    """
974
    env = {"OP_TARGET": self.cfg.GetClusterName()}
975
    return env, [], []
976

    
977
  def CheckPrereq(self):
978
    """Check prerequisites.
979

980
    This checks whether the cluster is empty.
981

982
    Any errors are signaled by raising errors.OpPrereqError.
983

984
    """
985
    master = self.cfg.GetMasterNode()
986

    
987
    nodelist = self.cfg.GetNodeList()
988
    if len(nodelist) != 1 or nodelist[0] != master:
989
      raise errors.OpPrereqError("There are still %d node(s) in"
990
                                 " this cluster." % (len(nodelist) - 1),
991
                                 errors.ECODE_INVAL)
992
    instancelist = self.cfg.GetInstanceList()
993
    if instancelist:
994
      raise errors.OpPrereqError("There are still %d instance(s) in"
995
                                 " this cluster." % len(instancelist),
996
                                 errors.ECODE_INVAL)
997

    
998
  def Exec(self, feedback_fn):
999
    """Destroys the cluster.
1000

1001
    """
1002
    master = self.cfg.GetMasterNode()
1003
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
1004

    
1005
    # Run post hooks on master node before it's removed
1006
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1007
    try:
1008
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1009
    except:
1010
      # pylint: disable-msg=W0702
1011
      self.LogWarning("Errors occurred running hooks on %s" % master)
1012

    
1013
    result = self.rpc.call_node_stop_master(master, False)
1014
    result.Raise("Could not disable the master role")
1015

    
1016
    if modify_ssh_setup:
1017
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1018
      utils.CreateBackup(priv_key)
1019
      utils.CreateBackup(pub_key)
1020

    
1021
    return master
1022

    
1023

    
1024
def _VerifyCertificate(filename):
1025
  """Verifies a certificate for LUVerifyCluster.
1026

1027
  @type filename: string
1028
  @param filename: Path to PEM file
1029

1030
  """
1031
  try:
1032
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1033
                                           utils.ReadFile(filename))
1034
  except Exception, err: # pylint: disable-msg=W0703
1035
    return (LUVerifyCluster.ETYPE_ERROR,
1036
            "Failed to load X509 certificate %s: %s" % (filename, err))
1037

    
1038
  (errcode, msg) = \
1039
    utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1040
                                constants.SSL_CERT_EXPIRATION_ERROR)
1041

    
1042
  if msg:
1043
    fnamemsg = "While verifying %s: %s" % (filename, msg)
1044
  else:
1045
    fnamemsg = None
1046

    
1047
  if errcode is None:
1048
    return (None, fnamemsg)
1049
  elif errcode == utils.CERT_WARNING:
1050
    return (LUVerifyCluster.ETYPE_WARNING, fnamemsg)
1051
  elif errcode == utils.CERT_ERROR:
1052
    return (LUVerifyCluster.ETYPE_ERROR, fnamemsg)
1053

    
1054
  raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1055

    
1056

    
1057
class LUVerifyCluster(LogicalUnit):
1058
  """Verifies the cluster status.
1059

1060
  """
1061
  HPATH = "cluster-verify"
1062
  HTYPE = constants.HTYPE_CLUSTER
1063
  _OP_REQP = ["skip_checks", "verbose", "error_codes", "debug_simulate_errors"]
1064
  REQ_BGL = False
1065

    
1066
  TCLUSTER = "cluster"
1067
  TNODE = "node"
1068
  TINSTANCE = "instance"
1069

    
1070
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1071
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1072
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1073
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1074
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1075
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1076
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1077
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1078
  ENODEDRBD = (TNODE, "ENODEDRBD")
1079
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1080
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1081
  ENODEHV = (TNODE, "ENODEHV")
1082
  ENODELVM = (TNODE, "ENODELVM")
1083
  ENODEN1 = (TNODE, "ENODEN1")
1084
  ENODENET = (TNODE, "ENODENET")
1085
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1086
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1087
  ENODERPC = (TNODE, "ENODERPC")
1088
  ENODESSH = (TNODE, "ENODESSH")
1089
  ENODEVERSION = (TNODE, "ENODEVERSION")
1090
  ENODESETUP = (TNODE, "ENODESETUP")
1091
  ENODETIME = (TNODE, "ENODETIME")
1092

    
1093
  ETYPE_FIELD = "code"
1094
  ETYPE_ERROR = "ERROR"
1095
  ETYPE_WARNING = "WARNING"
1096

    
1097
  class NodeImage(object):
1098
    """A class representing the logical and physical status of a node.
1099

1100
    @ivar volumes: a structure as returned from
1101
        L{ganeti.backend.GetVolumeList} (runtime)
1102
    @ivar instances: a list of running instances (runtime)
1103
    @ivar pinst: list of configured primary instances (config)
1104
    @ivar sinst: list of configured secondary instances (config)
1105
    @ivar sbp: diction of {secondary-node: list of instances} of all peers
1106
        of this node (config)
1107
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1108
    @ivar dfree: free disk, as reported by the node (runtime)
1109
    @ivar offline: the offline status (config)
1110
    @type rpc_fail: boolean
1111
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1112
        not whether the individual keys were correct) (runtime)
1113
    @type lvm_fail: boolean
1114
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1115
    @type hyp_fail: boolean
1116
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1117
    @type ghost: boolean
1118
    @ivar ghost: whether this is a known node or not (config)
1119

1120
    """
1121
    def __init__(self, offline=False):
1122
      self.volumes = {}
1123
      self.instances = []
1124
      self.pinst = []
1125
      self.sinst = []
1126
      self.sbp = {}
1127
      self.mfree = 0
1128
      self.dfree = 0
1129
      self.offline = offline
1130
      self.rpc_fail = False
1131
      self.lvm_fail = False
1132
      self.hyp_fail = False
1133
      self.ghost = False
1134

    
1135
  def ExpandNames(self):
1136
    self.needed_locks = {
1137
      locking.LEVEL_NODE: locking.ALL_SET,
1138
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1139
    }
1140
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1141

    
1142
  def _Error(self, ecode, item, msg, *args, **kwargs):
1143
    """Format an error message.
1144

1145
    Based on the opcode's error_codes parameter, either format a
1146
    parseable error code, or a simpler error string.
1147

1148
    This must be called only from Exec and functions called from Exec.
1149

1150
    """
1151
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1152
    itype, etxt = ecode
1153
    # first complete the msg
1154
    if args:
1155
      msg = msg % args
1156
    # then format the whole message
1157
    if self.op.error_codes:
1158
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1159
    else:
1160
      if item:
1161
        item = " " + item
1162
      else:
1163
        item = ""
1164
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1165
    # and finally report it via the feedback_fn
1166
    self._feedback_fn("  - %s" % msg)
1167

    
1168
  def _ErrorIf(self, cond, *args, **kwargs):
1169
    """Log an error message if the passed condition is True.
1170

1171
    """
1172
    cond = bool(cond) or self.op.debug_simulate_errors
1173
    if cond:
1174
      self._Error(*args, **kwargs)
1175
    # do not mark the operation as failed for WARN cases only
1176
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1177
      self.bad = self.bad or cond
1178

    
1179
  def _VerifyNode(self, ninfo, nresult):
1180
    """Run multiple tests against a node.
1181

1182
    Test list:
1183

1184
      - compares ganeti version
1185
      - checks vg existence and size > 20G
1186
      - checks config file checksum
1187
      - checks ssh to other nodes
1188

1189
    @type ninfo: L{objects.Node}
1190
    @param ninfo: the node to check
1191
    @param nresult: the results from the node
1192
    @rtype: boolean
1193
    @return: whether overall this call was successful (and we can expect
1194
         reasonable values in the respose)
1195

1196
    """
1197
    node = ninfo.name
1198
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1199

    
1200
    # main result, nresult should be a non-empty dict
1201
    test = not nresult or not isinstance(nresult, dict)
1202
    _ErrorIf(test, self.ENODERPC, node,
1203
                  "unable to verify node: no data returned")
1204
    if test:
1205
      return False
1206

    
1207
    # compares ganeti version
1208
    local_version = constants.PROTOCOL_VERSION
1209
    remote_version = nresult.get("version", None)
1210
    test = not (remote_version and
1211
                isinstance(remote_version, (list, tuple)) and
1212
                len(remote_version) == 2)
1213
    _ErrorIf(test, self.ENODERPC, node,
1214
             "connection to node returned invalid data")
1215
    if test:
1216
      return False
1217

    
1218
    test = local_version != remote_version[0]
1219
    _ErrorIf(test, self.ENODEVERSION, node,
1220
             "incompatible protocol versions: master %s,"
1221
             " node %s", local_version, remote_version[0])
1222
    if test:
1223
      return False
1224

    
1225
    # node seems compatible, we can actually try to look into its results
1226

    
1227
    # full package version
1228
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1229
                  self.ENODEVERSION, node,
1230
                  "software version mismatch: master %s, node %s",
1231
                  constants.RELEASE_VERSION, remote_version[1],
1232
                  code=self.ETYPE_WARNING)
1233

    
1234
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1235
    if isinstance(hyp_result, dict):
1236
      for hv_name, hv_result in hyp_result.iteritems():
1237
        test = hv_result is not None
1238
        _ErrorIf(test, self.ENODEHV, node,
1239
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1240

    
1241

    
1242
    test = nresult.get(constants.NV_NODESETUP,
1243
                           ["Missing NODESETUP results"])
1244
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1245
             "; ".join(test))
1246

    
1247
    return True
1248

    
1249
  def _VerifyNodeTime(self, ninfo, nresult,
1250
                      nvinfo_starttime, nvinfo_endtime):
1251
    """Check the node time.
1252

1253
    @type ninfo: L{objects.Node}
1254
    @param ninfo: the node to check
1255
    @param nresult: the remote results for the node
1256
    @param nvinfo_starttime: the start time of the RPC call
1257
    @param nvinfo_endtime: the end time of the RPC call
1258

1259
    """
1260
    node = ninfo.name
1261
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1262

    
1263
    ntime = nresult.get(constants.NV_TIME, None)
1264
    try:
1265
      ntime_merged = utils.MergeTime(ntime)
1266
    except (ValueError, TypeError):
1267
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1268
      return
1269

    
1270
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1271
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1272
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1273
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1274
    else:
1275
      ntime_diff = None
1276

    
1277
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1278
             "Node time diverges by at least %s from master node time",
1279
             ntime_diff)
1280

    
1281
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1282
    """Check the node time.
1283

1284
    @type ninfo: L{objects.Node}
1285
    @param ninfo: the node to check
1286
    @param nresult: the remote results for the node
1287
    @param vg_name: the configured VG name
1288

1289
    """
1290
    if vg_name is None:
1291
      return
1292

    
1293
    node = ninfo.name
1294
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1295

    
1296
    # checks vg existence and size > 20G
1297
    vglist = nresult.get(constants.NV_VGLIST, None)
1298
    test = not vglist
1299
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1300
    if not test:
1301
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1302
                                            constants.MIN_VG_SIZE)
1303
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1304

    
1305
    # check pv names
1306
    pvlist = nresult.get(constants.NV_PVLIST, None)
1307
    test = pvlist is None
1308
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1309
    if not test:
1310
      # check that ':' is not present in PV names, since it's a
1311
      # special character for lvcreate (denotes the range of PEs to
1312
      # use on the PV)
1313
      for _, pvname, owner_vg in pvlist:
1314
        test = ":" in pvname
1315
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1316
                 " '%s' of VG '%s'", pvname, owner_vg)
1317

    
1318
  def _VerifyNodeNetwork(self, ninfo, nresult):
1319
    """Check the node time.
1320

1321
    @type ninfo: L{objects.Node}
1322
    @param ninfo: the node to check
1323
    @param nresult: the remote results for the node
1324

1325
    """
1326
    node = ninfo.name
1327
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1328

    
1329
    test = constants.NV_NODELIST not in nresult
1330
    _ErrorIf(test, self.ENODESSH, node,
1331
             "node hasn't returned node ssh connectivity data")
1332
    if not test:
1333
      if nresult[constants.NV_NODELIST]:
1334
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1335
          _ErrorIf(True, self.ENODESSH, node,
1336
                   "ssh communication with node '%s': %s", a_node, a_msg)
1337

    
1338
    test = constants.NV_NODENETTEST not in nresult
1339
    _ErrorIf(test, self.ENODENET, node,
1340
             "node hasn't returned node tcp connectivity data")
1341
    if not test:
1342
      if nresult[constants.NV_NODENETTEST]:
1343
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1344
        for anode in nlist:
1345
          _ErrorIf(True, self.ENODENET, node,
1346
                   "tcp communication with node '%s': %s",
1347
                   anode, nresult[constants.NV_NODENETTEST][anode])
1348

    
1349
    test = constants.NV_MASTERIP not in nresult
1350
    _ErrorIf(test, self.ENODENET, node,
1351
             "node hasn't returned node master IP reachability data")
1352
    if not test:
1353
      if not nresult[constants.NV_MASTERIP]:
1354
        if node == self.master_node:
1355
          msg = "the master node cannot reach the master IP (not configured?)"
1356
        else:
1357
          msg = "cannot reach the master IP"
1358
        _ErrorIf(True, self.ENODENET, node, msg)
1359

    
1360

    
1361
  def _VerifyInstance(self, instance, instanceconfig, node_image):
1362
    """Verify an instance.
1363

1364
    This function checks to see if the required block devices are
1365
    available on the instance's node.
1366

1367
    """
1368
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1369
    node_current = instanceconfig.primary_node
1370

    
1371
    node_vol_should = {}
1372
    instanceconfig.MapLVsByNode(node_vol_should)
1373

    
1374
    for node in node_vol_should:
1375
      n_img = node_image[node]
1376
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1377
        # ignore missing volumes on offline or broken nodes
1378
        continue
1379
      for volume in node_vol_should[node]:
1380
        test = volume not in n_img.volumes
1381
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1382
                 "volume %s missing on node %s", volume, node)
1383

    
1384
    if instanceconfig.admin_up:
1385
      pri_img = node_image[node_current]
1386
      test = instance not in pri_img.instances and not pri_img.offline
1387
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1388
               "instance not running on its primary node %s",
1389
               node_current)
1390

    
1391
    for node, n_img in node_image.items():
1392
      if (not node == node_current):
1393
        test = instance in n_img.instances
1394
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1395
                 "instance should not run on node %s", node)
1396

    
1397
  def _VerifyOrphanVolumes(self, node_vol_should, node_image):
1398
    """Verify if there are any unknown volumes in the cluster.
1399

1400
    The .os, .swap and backup volumes are ignored. All other volumes are
1401
    reported as unknown.
1402

1403
    """
1404
    for node, n_img in node_image.items():
1405
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1406
        # skip non-healthy nodes
1407
        continue
1408
      for volume in n_img.volumes:
1409
        test = (node not in node_vol_should or
1410
                volume not in node_vol_should[node])
1411
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1412
                      "volume %s is unknown", volume)
1413

    
1414
  def _VerifyOrphanInstances(self, instancelist, node_image):
1415
    """Verify the list of running instances.
1416

1417
    This checks what instances are running but unknown to the cluster.
1418

1419
    """
1420
    for node, n_img in node_image.items():
1421
      for o_inst in n_img.instances:
1422
        test = o_inst not in instancelist
1423
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1424
                      "instance %s on node %s should not exist", o_inst, node)
1425

    
1426
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1427
    """Verify N+1 Memory Resilience.
1428

1429
    Check that if one single node dies we can still start all the
1430
    instances it was primary for.
1431

1432
    """
1433
    for node, n_img in node_image.items():
1434
      # This code checks that every node which is now listed as
1435
      # secondary has enough memory to host all instances it is
1436
      # supposed to should a single other node in the cluster fail.
1437
      # FIXME: not ready for failover to an arbitrary node
1438
      # FIXME: does not support file-backed instances
1439
      # WARNING: we currently take into account down instances as well
1440
      # as up ones, considering that even if they're down someone
1441
      # might want to start them even in the event of a node failure.
1442
      for prinode, instances in n_img.sbp.items():
1443
        needed_mem = 0
1444
        for instance in instances:
1445
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1446
          if bep[constants.BE_AUTO_BALANCE]:
1447
            needed_mem += bep[constants.BE_MEMORY]
1448
        test = n_img.mfree < needed_mem
1449
        self._ErrorIf(test, self.ENODEN1, node,
1450
                      "not enough memory on to accommodate"
1451
                      " failovers should peer node %s fail", prinode)
1452

    
1453
  def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1454
                       master_files):
1455
    """Verifies and computes the node required file checksums.
1456

1457
    @type ninfo: L{objects.Node}
1458
    @param ninfo: the node to check
1459
    @param nresult: the remote results for the node
1460
    @param file_list: required list of files
1461
    @param local_cksum: dictionary of local files and their checksums
1462
    @param master_files: list of files that only masters should have
1463

1464
    """
1465
    node = ninfo.name
1466
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1467

    
1468
    remote_cksum = nresult.get(constants.NV_FILELIST, None)
1469
    test = not isinstance(remote_cksum, dict)
1470
    _ErrorIf(test, self.ENODEFILECHECK, node,
1471
             "node hasn't returned file checksum data")
1472
    if test:
1473
      return
1474

    
1475
    for file_name in file_list:
1476
      node_is_mc = ninfo.master_candidate
1477
      must_have = (file_name not in master_files) or node_is_mc
1478
      # missing
1479
      test1 = file_name not in remote_cksum
1480
      # invalid checksum
1481
      test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1482
      # existing and good
1483
      test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1484
      _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1485
               "file '%s' missing", file_name)
1486
      _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1487
               "file '%s' has wrong checksum", file_name)
1488
      # not candidate and this is not a must-have file
1489
      _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1490
               "file '%s' should not exist on non master"
1491
               " candidates (and the file is outdated)", file_name)
1492
      # all good, except non-master/non-must have combination
1493
      _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1494
               "file '%s' should not exist"
1495
               " on non master candidates", file_name)
1496

    
1497
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_map):
1498
    """Verifies and the node DRBD status.
1499

1500
    @type ninfo: L{objects.Node}
1501
    @param ninfo: the node to check
1502
    @param nresult: the remote results for the node
1503
    @param instanceinfo: the dict of instances
1504
    @param drbd_map: the DRBD map as returned by
1505
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1506

1507
    """
1508
    node = ninfo.name
1509
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1510

    
1511
    # compute the DRBD minors
1512
    node_drbd = {}
1513
    for minor, instance in drbd_map[node].items():
1514
      test = instance not in instanceinfo
1515
      _ErrorIf(test, self.ECLUSTERCFG, None,
1516
               "ghost instance '%s' in temporary DRBD map", instance)
1517
        # ghost instance should not be running, but otherwise we
1518
        # don't give double warnings (both ghost instance and
1519
        # unallocated minor in use)
1520
      if test:
1521
        node_drbd[minor] = (instance, False)
1522
      else:
1523
        instance = instanceinfo[instance]
1524
        node_drbd[minor] = (instance.name, instance.admin_up)
1525

    
1526
    # and now check them
1527
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
1528
    test = not isinstance(used_minors, (tuple, list))
1529
    _ErrorIf(test, self.ENODEDRBD, node,
1530
             "cannot parse drbd status file: %s", str(used_minors))
1531
    if test:
1532
      # we cannot check drbd status
1533
      return
1534

    
1535
    for minor, (iname, must_exist) in node_drbd.items():
1536
      test = minor not in used_minors and must_exist
1537
      _ErrorIf(test, self.ENODEDRBD, node,
1538
               "drbd minor %d of instance %s is not active", minor, iname)
1539
    for minor in used_minors:
1540
      test = minor not in node_drbd
1541
      _ErrorIf(test, self.ENODEDRBD, node,
1542
               "unallocated drbd minor %d is in use", minor)
1543

    
1544
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1545
    """Verifies and updates the node volume data.
1546

1547
    This function will update a L{NodeImage}'s internal structures
1548
    with data from the remote call.
1549

1550
    @type ninfo: L{objects.Node}
1551
    @param ninfo: the node to check
1552
    @param nresult: the remote results for the node
1553
    @param nimg: the node image object
1554
    @param vg_name: the configured VG name
1555

1556
    """
1557
    node = ninfo.name
1558
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1559

    
1560
    nimg.lvm_fail = True
1561
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1562
    if vg_name is None:
1563
      pass
1564
    elif isinstance(lvdata, basestring):
1565
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1566
               utils.SafeEncode(lvdata))
1567
    elif not isinstance(lvdata, dict):
1568
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1569
    else:
1570
      nimg.volumes = lvdata
1571
      nimg.lvm_fail = False
1572

    
1573
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1574
    """Verifies and updates the node instance list.
1575

1576
    If the listing was successful, then updates this node's instance
1577
    list. Otherwise, it marks the RPC call as failed for the instance
1578
    list key.
1579

1580
    @type ninfo: L{objects.Node}
1581
    @param ninfo: the node to check
1582
    @param nresult: the remote results for the node
1583
    @param nimg: the node image object
1584

1585
    """
1586
    idata = nresult.get(constants.NV_INSTANCELIST, None)
1587
    test = not isinstance(idata, list)
1588
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1589
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
1590
    if test:
1591
      nimg.hyp_fail = True
1592
    else:
1593
      nimg.instances = idata
1594

    
1595
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1596
    """Verifies and computes a node information map
1597

1598
    @type ninfo: L{objects.Node}
1599
    @param ninfo: the node to check
1600
    @param nresult: the remote results for the node
1601
    @param nimg: the node image object
1602
    @param vg_name: the configured VG name
1603

1604
    """
1605
    node = ninfo.name
1606
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1607

    
1608
    # try to read free memory (from the hypervisor)
1609
    hv_info = nresult.get(constants.NV_HVINFO, None)
1610
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1611
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1612
    if not test:
1613
      try:
1614
        nimg.mfree = int(hv_info["memory_free"])
1615
      except (ValueError, TypeError):
1616
        _ErrorIf(True, self.ENODERPC, node,
1617
                 "node returned invalid nodeinfo, check hypervisor")
1618

    
1619
    # FIXME: devise a free space model for file based instances as well
1620
    if vg_name is not None:
1621
      test = (constants.NV_VGLIST not in nresult or
1622
              vg_name not in nresult[constants.NV_VGLIST])
1623
      _ErrorIf(test, self.ENODELVM, node,
1624
               "node didn't return data for the volume group '%s'"
1625
               " - it is either missing or broken", vg_name)
1626
      if not test:
1627
        try:
1628
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1629
        except (ValueError, TypeError):
1630
          _ErrorIf(True, self.ENODERPC, node,
1631
                   "node returned invalid LVM info, check LVM status")
1632

    
1633
  def CheckPrereq(self):
1634
    """Check prerequisites.
1635

1636
    Transform the list of checks we're going to skip into a set and check that
1637
    all its members are valid.
1638

1639
    """
1640
    self.skip_set = frozenset(self.op.skip_checks)
1641
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
1642
      raise errors.OpPrereqError("Invalid checks to be skipped specified",
1643
                                 errors.ECODE_INVAL)
1644

    
1645
  def BuildHooksEnv(self):
1646
    """Build hooks env.
1647

1648
    Cluster-Verify hooks just ran in the post phase and their failure makes
1649
    the output be logged in the verify output and the verification to fail.
1650

1651
    """
1652
    all_nodes = self.cfg.GetNodeList()
1653
    env = {
1654
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
1655
      }
1656
    for node in self.cfg.GetAllNodesInfo().values():
1657
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
1658

    
1659
    return env, [], all_nodes
1660

    
1661
  def Exec(self, feedback_fn):
1662
    """Verify integrity of cluster, performing various test on nodes.
1663

1664
    """
1665
    self.bad = False
1666
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1667
    verbose = self.op.verbose
1668
    self._feedback_fn = feedback_fn
1669
    feedback_fn("* Verifying global settings")
1670
    for msg in self.cfg.VerifyConfig():
1671
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
1672

    
1673
    # Check the cluster certificates
1674
    for cert_filename in constants.ALL_CERT_FILES:
1675
      (errcode, msg) = _VerifyCertificate(cert_filename)
1676
      _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
1677

    
1678
    vg_name = self.cfg.GetVGName()
1679
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
1680
    cluster = self.cfg.GetClusterInfo()
1681
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
1682
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
1683
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
1684
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
1685
                        for iname in instancelist)
1686
    i_non_redundant = [] # Non redundant instances
1687
    i_non_a_balanced = [] # Non auto-balanced instances
1688
    n_offline = 0 # Count of offline nodes
1689
    n_drained = 0 # Count of nodes being drained
1690
    node_vol_should = {}
1691

    
1692
    # FIXME: verify OS list
1693
    # do local checksums
1694
    master_files = [constants.CLUSTER_CONF_FILE]
1695
    master_node = self.master_node = self.cfg.GetMasterNode()
1696
    master_ip = self.cfg.GetMasterIP()
1697

    
1698
    file_names = ssconf.SimpleStore().GetFileList()
1699
    file_names.extend(constants.ALL_CERT_FILES)
1700
    file_names.extend(master_files)
1701
    if cluster.modify_etc_hosts:
1702
      file_names.append(constants.ETC_HOSTS)
1703

    
1704
    local_checksums = utils.FingerprintFiles(file_names)
1705

    
1706
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1707
    node_verify_param = {
1708
      constants.NV_FILELIST: file_names,
1709
      constants.NV_NODELIST: [node.name for node in nodeinfo
1710
                              if not node.offline],
1711
      constants.NV_HYPERVISOR: hypervisors,
1712
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1713
                                  node.secondary_ip) for node in nodeinfo
1714
                                 if not node.offline],
1715
      constants.NV_INSTANCELIST: hypervisors,
1716
      constants.NV_VERSION: None,
1717
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1718
      constants.NV_NODESETUP: None,
1719
      constants.NV_TIME: None,
1720
      constants.NV_MASTERIP: (master_node, master_ip),
1721
      }
1722

    
1723
    if vg_name is not None:
1724
      node_verify_param[constants.NV_VGLIST] = None
1725
      node_verify_param[constants.NV_LVLIST] = vg_name
1726
      node_verify_param[constants.NV_PVLIST] = [vg_name]
1727
      node_verify_param[constants.NV_DRBDLIST] = None
1728

    
1729
    # Build our expected cluster state
1730
    node_image = dict((node.name, self.NodeImage(offline=node.offline))
1731
                      for node in nodeinfo)
1732

    
1733
    for instance in instancelist:
1734
      inst_config = instanceinfo[instance]
1735

    
1736
      for nname in inst_config.all_nodes:
1737
        if nname not in node_image:
1738
          # ghost node
1739
          gnode = self.NodeImage()
1740
          gnode.ghost = True
1741
          node_image[nname] = gnode
1742

    
1743
      inst_config.MapLVsByNode(node_vol_should)
1744

    
1745
      pnode = inst_config.primary_node
1746
      node_image[pnode].pinst.append(instance)
1747

    
1748
      for snode in inst_config.secondary_nodes:
1749
        nimg = node_image[snode]
1750
        nimg.sinst.append(instance)
1751
        if pnode not in nimg.sbp:
1752
          nimg.sbp[pnode] = []
1753
        nimg.sbp[pnode].append(instance)
1754

    
1755
    # At this point, we have the in-memory data structures complete,
1756
    # except for the runtime information, which we'll gather next
1757

    
1758
    # Due to the way our RPC system works, exact response times cannot be
1759
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
1760
    # time before and after executing the request, we can at least have a time
1761
    # window.
1762
    nvinfo_starttime = time.time()
1763
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1764
                                           self.cfg.GetClusterName())
1765
    nvinfo_endtime = time.time()
1766

    
1767
    all_drbd_map = self.cfg.ComputeDRBDMap()
1768

    
1769
    feedback_fn("* Verifying node status")
1770
    for node_i in nodeinfo:
1771
      node = node_i.name
1772
      nimg = node_image[node]
1773

    
1774
      if node_i.offline:
1775
        if verbose:
1776
          feedback_fn("* Skipping offline node %s" % (node,))
1777
        n_offline += 1
1778
        continue
1779

    
1780
      if node == master_node:
1781
        ntype = "master"
1782
      elif node_i.master_candidate:
1783
        ntype = "master candidate"
1784
      elif node_i.drained:
1785
        ntype = "drained"
1786
        n_drained += 1
1787
      else:
1788
        ntype = "regular"
1789
      if verbose:
1790
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1791

    
1792
      msg = all_nvinfo[node].fail_msg
1793
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
1794
      if msg:
1795
        nimg.rpc_fail = True
1796
        continue
1797

    
1798
      nresult = all_nvinfo[node].payload
1799

    
1800
      nimg.call_ok = self._VerifyNode(node_i, nresult)
1801
      self._VerifyNodeNetwork(node_i, nresult)
1802
      self._VerifyNodeLVM(node_i, nresult, vg_name)
1803
      self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
1804
                            master_files)
1805
      self._VerifyNodeDrbd(node_i, nresult, instanceinfo, all_drbd_map)
1806
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
1807

    
1808
      self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
1809
      self._UpdateNodeInstances(node_i, nresult, nimg)
1810
      self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
1811

    
1812
    feedback_fn("* Verifying instance status")
1813
    for instance in instancelist:
1814
      if verbose:
1815
        feedback_fn("* Verifying instance %s" % instance)
1816
      inst_config = instanceinfo[instance]
1817
      self._VerifyInstance(instance, inst_config, node_image)
1818
      inst_nodes_offline = []
1819

    
1820
      pnode = inst_config.primary_node
1821
      pnode_img = node_image[pnode]
1822
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
1823
               self.ENODERPC, pnode, "instance %s, connection to"
1824
               " primary node failed", instance)
1825

    
1826
      if pnode_img.offline:
1827
        inst_nodes_offline.append(pnode)
1828

    
1829
      # If the instance is non-redundant we cannot survive losing its primary
1830
      # node, so we are not N+1 compliant. On the other hand we have no disk
1831
      # templates with more than one secondary so that situation is not well
1832
      # supported either.
1833
      # FIXME: does not support file-backed instances
1834
      if not inst_config.secondary_nodes:
1835
        i_non_redundant.append(instance)
1836
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
1837
               instance, "instance has multiple secondary nodes: %s",
1838
               utils.CommaJoin(inst_config.secondary_nodes),
1839
               code=self.ETYPE_WARNING)
1840

    
1841
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1842
        i_non_a_balanced.append(instance)
1843

    
1844
      for snode in inst_config.secondary_nodes:
1845
        s_img = node_image[snode]
1846
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
1847
                 "instance %s, connection to secondary node failed", instance)
1848

    
1849
        if s_img.offline:
1850
          inst_nodes_offline.append(snode)
1851

    
1852
      # warn that the instance lives on offline nodes
1853
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
1854
               "instance lives on offline node(s) %s",
1855
               utils.CommaJoin(inst_nodes_offline))
1856
      # ... or ghost nodes
1857
      for node in inst_config.all_nodes:
1858
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
1859
                 "instance lives on ghost node %s", node)
1860

    
1861
    feedback_fn("* Verifying orphan volumes")
1862
    self._VerifyOrphanVolumes(node_vol_should, node_image)
1863

    
1864
    feedback_fn("* Verifying orphan instances")
1865
    self._VerifyOrphanInstances(instancelist, node_image)
1866

    
1867
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1868
      feedback_fn("* Verifying N+1 Memory redundancy")
1869
      self._VerifyNPlusOneMemory(node_image, instanceinfo)
1870

    
1871
    feedback_fn("* Other Notes")
1872
    if i_non_redundant:
1873
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1874
                  % len(i_non_redundant))
1875

    
1876
    if i_non_a_balanced:
1877
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1878
                  % len(i_non_a_balanced))
1879

    
1880
    if n_offline:
1881
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
1882

    
1883
    if n_drained:
1884
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
1885

    
1886
    return not self.bad
1887

    
1888
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1889
    """Analyze the post-hooks' result
1890

1891
    This method analyses the hook result, handles it, and sends some
1892
    nicely-formatted feedback back to the user.
1893

1894
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1895
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1896
    @param hooks_results: the results of the multi-node hooks rpc call
1897
    @param feedback_fn: function used send feedback back to the caller
1898
    @param lu_result: previous Exec result
1899
    @return: the new Exec result, based on the previous result
1900
        and hook results
1901

1902
    """
1903
    # We only really run POST phase hooks, and are only interested in
1904
    # their results
1905
    if phase == constants.HOOKS_PHASE_POST:
1906
      # Used to change hooks' output to proper indentation
1907
      indent_re = re.compile('^', re.M)
1908
      feedback_fn("* Hooks Results")
1909
      assert hooks_results, "invalid result from hooks"
1910

    
1911
      for node_name in hooks_results:
1912
        res = hooks_results[node_name]
1913
        msg = res.fail_msg
1914
        test = msg and not res.offline
1915
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
1916
                      "Communication failure in hooks execution: %s", msg)
1917
        if res.offline or msg:
1918
          # No need to investigate payload if node is offline or gave an error.
1919
          # override manually lu_result here as _ErrorIf only
1920
          # overrides self.bad
1921
          lu_result = 1
1922
          continue
1923
        for script, hkr, output in res.payload:
1924
          test = hkr == constants.HKR_FAIL
1925
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
1926
                        "Script %s failed, output:", script)
1927
          if test:
1928
            output = indent_re.sub('      ', output)
1929
            feedback_fn("%s" % output)
1930
            lu_result = 0
1931

    
1932
      return lu_result
1933

    
1934

    
1935
class LUVerifyDisks(NoHooksLU):
1936
  """Verifies the cluster disks status.
1937

1938
  """
1939
  _OP_REQP = []
1940
  REQ_BGL = False
1941

    
1942
  def ExpandNames(self):
1943
    self.needed_locks = {
1944
      locking.LEVEL_NODE: locking.ALL_SET,
1945
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1946
    }
1947
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1948

    
1949
  def CheckPrereq(self):
1950
    """Check prerequisites.
1951

1952
    This has no prerequisites.
1953

1954
    """
1955
    pass
1956

    
1957
  def Exec(self, feedback_fn):
1958
    """Verify integrity of cluster disks.
1959

1960
    @rtype: tuple of three items
1961
    @return: a tuple of (dict of node-to-node_error, list of instances
1962
        which need activate-disks, dict of instance: (node, volume) for
1963
        missing volumes
1964

1965
    """
1966
    result = res_nodes, res_instances, res_missing = {}, [], {}
1967

    
1968
    vg_name = self.cfg.GetVGName()
1969
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1970
    instances = [self.cfg.GetInstanceInfo(name)
1971
                 for name in self.cfg.GetInstanceList()]
1972

    
1973
    nv_dict = {}
1974
    for inst in instances:
1975
      inst_lvs = {}
1976
      if (not inst.admin_up or
1977
          inst.disk_template not in constants.DTS_NET_MIRROR):
1978
        continue
1979
      inst.MapLVsByNode(inst_lvs)
1980
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1981
      for node, vol_list in inst_lvs.iteritems():
1982
        for vol in vol_list:
1983
          nv_dict[(node, vol)] = inst
1984

    
1985
    if not nv_dict:
1986
      return result
1987

    
1988
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
1989

    
1990
    for node in nodes:
1991
      # node_volume
1992
      node_res = node_lvs[node]
1993
      if node_res.offline:
1994
        continue
1995
      msg = node_res.fail_msg
1996
      if msg:
1997
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1998
        res_nodes[node] = msg
1999
        continue
2000

    
2001
      lvs = node_res.payload
2002
      for lv_name, (_, _, lv_online) in lvs.items():
2003
        inst = nv_dict.pop((node, lv_name), None)
2004
        if (not lv_online and inst is not None
2005
            and inst.name not in res_instances):
2006
          res_instances.append(inst.name)
2007

    
2008
    # any leftover items in nv_dict are missing LVs, let's arrange the
2009
    # data better
2010
    for key, inst in nv_dict.iteritems():
2011
      if inst.name not in res_missing:
2012
        res_missing[inst.name] = []
2013
      res_missing[inst.name].append(key)
2014

    
2015
    return result
2016

    
2017

    
2018
class LURepairDiskSizes(NoHooksLU):
2019
  """Verifies the cluster disks sizes.
2020

2021
  """
2022
  _OP_REQP = ["instances"]
2023
  REQ_BGL = False
2024

    
2025
  def ExpandNames(self):
2026
    if not isinstance(self.op.instances, list):
2027
      raise errors.OpPrereqError("Invalid argument type 'instances'",
2028
                                 errors.ECODE_INVAL)
2029

    
2030
    if self.op.instances:
2031
      self.wanted_names = []
2032
      for name in self.op.instances:
2033
        full_name = _ExpandInstanceName(self.cfg, name)
2034
        self.wanted_names.append(full_name)
2035
      self.needed_locks = {
2036
        locking.LEVEL_NODE: [],
2037
        locking.LEVEL_INSTANCE: self.wanted_names,
2038
        }
2039
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2040
    else:
2041
      self.wanted_names = None
2042
      self.needed_locks = {
2043
        locking.LEVEL_NODE: locking.ALL_SET,
2044
        locking.LEVEL_INSTANCE: locking.ALL_SET,
2045
        }
2046
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2047

    
2048
  def DeclareLocks(self, level):
2049
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
2050
      self._LockInstancesNodes(primary_only=True)
2051

    
2052
  def CheckPrereq(self):
2053
    """Check prerequisites.
2054

2055
    This only checks the optional instance list against the existing names.
2056

2057
    """
2058
    if self.wanted_names is None:
2059
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2060

    
2061
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2062
                             in self.wanted_names]
2063

    
2064
  def _EnsureChildSizes(self, disk):
2065
    """Ensure children of the disk have the needed disk size.
2066

2067
    This is valid mainly for DRBD8 and fixes an issue where the
2068
    children have smaller disk size.
2069

2070
    @param disk: an L{ganeti.objects.Disk} object
2071

2072
    """
2073
    if disk.dev_type == constants.LD_DRBD8:
2074
      assert disk.children, "Empty children for DRBD8?"
2075
      fchild = disk.children[0]
2076
      mismatch = fchild.size < disk.size
2077
      if mismatch:
2078
        self.LogInfo("Child disk has size %d, parent %d, fixing",
2079
                     fchild.size, disk.size)
2080
        fchild.size = disk.size
2081

    
2082
      # and we recurse on this child only, not on the metadev
2083
      return self._EnsureChildSizes(fchild) or mismatch
2084
    else:
2085
      return False
2086

    
2087
  def Exec(self, feedback_fn):
2088
    """Verify the size of cluster disks.
2089

2090
    """
2091
    # TODO: check child disks too
2092
    # TODO: check differences in size between primary/secondary nodes
2093
    per_node_disks = {}
2094
    for instance in self.wanted_instances:
2095
      pnode = instance.primary_node
2096
      if pnode not in per_node_disks:
2097
        per_node_disks[pnode] = []
2098
      for idx, disk in enumerate(instance.disks):
2099
        per_node_disks[pnode].append((instance, idx, disk))
2100

    
2101
    changed = []
2102
    for node, dskl in per_node_disks.items():
2103
      newl = [v[2].Copy() for v in dskl]
2104
      for dsk in newl:
2105
        self.cfg.SetDiskID(dsk, node)
2106
      result = self.rpc.call_blockdev_getsizes(node, newl)
2107
      if result.fail_msg:
2108
        self.LogWarning("Failure in blockdev_getsizes call to node"
2109
                        " %s, ignoring", node)
2110
        continue
2111
      if len(result.data) != len(dskl):
2112
        self.LogWarning("Invalid result from node %s, ignoring node results",
2113
                        node)
2114
        continue
2115
      for ((instance, idx, disk), size) in zip(dskl, result.data):
2116
        if size is None:
2117
          self.LogWarning("Disk %d of instance %s did not return size"
2118
                          " information, ignoring", idx, instance.name)
2119
          continue
2120
        if not isinstance(size, (int, long)):
2121
          self.LogWarning("Disk %d of instance %s did not return valid"
2122
                          " size information, ignoring", idx, instance.name)
2123
          continue
2124
        size = size >> 20
2125
        if size != disk.size:
2126
          self.LogInfo("Disk %d of instance %s has mismatched size,"
2127
                       " correcting: recorded %d, actual %d", idx,
2128
                       instance.name, disk.size, size)
2129
          disk.size = size
2130
          self.cfg.Update(instance, feedback_fn)
2131
          changed.append((instance.name, idx, size))
2132
        if self._EnsureChildSizes(disk):
2133
          self.cfg.Update(instance, feedback_fn)
2134
          changed.append((instance.name, idx, disk.size))
2135
    return changed
2136

    
2137

    
2138
class LURenameCluster(LogicalUnit):
2139
  """Rename the cluster.
2140

2141
  """
2142
  HPATH = "cluster-rename"
2143
  HTYPE = constants.HTYPE_CLUSTER
2144
  _OP_REQP = ["name"]
2145

    
2146
  def BuildHooksEnv(self):
2147
    """Build hooks env.
2148

2149
    """
2150
    env = {
2151
      "OP_TARGET": self.cfg.GetClusterName(),
2152
      "NEW_NAME": self.op.name,
2153
      }
2154
    mn = self.cfg.GetMasterNode()
2155
    all_nodes = self.cfg.GetNodeList()
2156
    return env, [mn], all_nodes
2157

    
2158
  def CheckPrereq(self):
2159
    """Verify that the passed name is a valid one.
2160

2161
    """
2162
    hostname = utils.GetHostInfo(self.op.name)
2163

    
2164
    new_name = hostname.name
2165
    self.ip = new_ip = hostname.ip
2166
    old_name = self.cfg.GetClusterName()
2167
    old_ip = self.cfg.GetMasterIP()
2168
    if new_name == old_name and new_ip == old_ip:
2169
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
2170
                                 " cluster has changed",
2171
                                 errors.ECODE_INVAL)
2172
    if new_ip != old_ip:
2173
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2174
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
2175
                                   " reachable on the network. Aborting." %
2176
                                   new_ip, errors.ECODE_NOTUNIQUE)
2177

    
2178
    self.op.name = new_name
2179

    
2180
  def Exec(self, feedback_fn):
2181
    """Rename the cluster.
2182

2183
    """
2184
    clustername = self.op.name
2185
    ip = self.ip
2186

    
2187
    # shutdown the master IP
2188
    master = self.cfg.GetMasterNode()
2189
    result = self.rpc.call_node_stop_master(master, False)
2190
    result.Raise("Could not disable the master role")
2191

    
2192
    try:
2193
      cluster = self.cfg.GetClusterInfo()
2194
      cluster.cluster_name = clustername
2195
      cluster.master_ip = ip
2196
      self.cfg.Update(cluster, feedback_fn)
2197

    
2198
      # update the known hosts file
2199
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2200
      node_list = self.cfg.GetNodeList()
2201
      try:
2202
        node_list.remove(master)
2203
      except ValueError:
2204
        pass
2205
      result = self.rpc.call_upload_file(node_list,
2206
                                         constants.SSH_KNOWN_HOSTS_FILE)
2207
      for to_node, to_result in result.iteritems():
2208
        msg = to_result.fail_msg
2209
        if msg:
2210
          msg = ("Copy of file %s to node %s failed: %s" %
2211
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
2212
          self.proc.LogWarning(msg)
2213

    
2214
    finally:
2215
      result = self.rpc.call_node_start_master(master, False, False)
2216
      msg = result.fail_msg
2217
      if msg:
2218
        self.LogWarning("Could not re-enable the master role on"
2219
                        " the master, please restart manually: %s", msg)
2220

    
2221

    
2222
def _RecursiveCheckIfLVMBased(disk):
2223
  """Check if the given disk or its children are lvm-based.
2224

2225
  @type disk: L{objects.Disk}
2226
  @param disk: the disk to check
2227
  @rtype: boolean
2228
  @return: boolean indicating whether a LD_LV dev_type was found or not
2229

2230
  """
2231
  if disk.children:
2232
    for chdisk in disk.children:
2233
      if _RecursiveCheckIfLVMBased(chdisk):
2234
        return True
2235
  return disk.dev_type == constants.LD_LV
2236

    
2237

    
2238
class LUSetClusterParams(LogicalUnit):
2239
  """Change the parameters of the cluster.
2240

2241
  """
2242
  HPATH = "cluster-modify"
2243
  HTYPE = constants.HTYPE_CLUSTER
2244
  _OP_REQP = []
2245
  REQ_BGL = False
2246

    
2247
  def CheckArguments(self):
2248
    """Check parameters
2249

2250
    """
2251
    for attr in ["candidate_pool_size",
2252
                 "uid_pool", "add_uids", "remove_uids"]:
2253
      if not hasattr(self.op, attr):
2254
        setattr(self.op, attr, None)
2255

    
2256
    if self.op.candidate_pool_size is not None:
2257
      try:
2258
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
2259
      except (ValueError, TypeError), err:
2260
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
2261
                                   str(err), errors.ECODE_INVAL)
2262
      if self.op.candidate_pool_size < 1:
2263
        raise errors.OpPrereqError("At least one master candidate needed",
2264
                                   errors.ECODE_INVAL)
2265

    
2266
    _CheckBooleanOpField(self.op, "maintain_node_health")
2267

    
2268
    if self.op.uid_pool:
2269
      uidpool.CheckUidPool(self.op.uid_pool)
2270

    
2271
    if self.op.add_uids:
2272
      uidpool.CheckUidPool(self.op.add_uids)
2273

    
2274
    if self.op.remove_uids:
2275
      uidpool.CheckUidPool(self.op.remove_uids)
2276

    
2277
  def ExpandNames(self):
2278
    # FIXME: in the future maybe other cluster params won't require checking on
2279
    # all nodes to be modified.
2280
    self.needed_locks = {
2281
      locking.LEVEL_NODE: locking.ALL_SET,
2282
    }
2283
    self.share_locks[locking.LEVEL_NODE] = 1
2284

    
2285
  def BuildHooksEnv(self):
2286
    """Build hooks env.
2287

2288
    """
2289
    env = {
2290
      "OP_TARGET": self.cfg.GetClusterName(),
2291
      "NEW_VG_NAME": self.op.vg_name,
2292
      }
2293
    mn = self.cfg.GetMasterNode()
2294
    return env, [mn], [mn]
2295

    
2296
  def CheckPrereq(self):
2297
    """Check prerequisites.
2298

2299
    This checks whether the given params don't conflict and
2300
    if the given volume group is valid.
2301

2302
    """
2303
    if self.op.vg_name is not None and not self.op.vg_name:
2304
      instances = self.cfg.GetAllInstancesInfo().values()
2305
      for inst in instances:
2306
        for disk in inst.disks:
2307
          if _RecursiveCheckIfLVMBased(disk):
2308
            raise errors.OpPrereqError("Cannot disable lvm storage while"
2309
                                       " lvm-based instances exist",
2310
                                       errors.ECODE_INVAL)
2311

    
2312
    node_list = self.acquired_locks[locking.LEVEL_NODE]
2313

    
2314
    # if vg_name not None, checks given volume group on all nodes
2315
    if self.op.vg_name:
2316
      vglist = self.rpc.call_vg_list(node_list)
2317
      for node in node_list:
2318
        msg = vglist[node].fail_msg
2319
        if msg:
2320
          # ignoring down node
2321
          self.LogWarning("Error while gathering data on node %s"
2322
                          " (ignoring node): %s", node, msg)
2323
          continue
2324
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2325
                                              self.op.vg_name,
2326
                                              constants.MIN_VG_SIZE)
2327
        if vgstatus:
2328
          raise errors.OpPrereqError("Error on node '%s': %s" %
2329
                                     (node, vgstatus), errors.ECODE_ENVIRON)
2330

    
2331
    self.cluster = cluster = self.cfg.GetClusterInfo()
2332
    # validate params changes
2333
    if self.op.beparams:
2334
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2335
      self.new_beparams = objects.FillDict(
2336
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
2337

    
2338
    if self.op.nicparams:
2339
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2340
      self.new_nicparams = objects.FillDict(
2341
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
2342
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
2343
      nic_errors = []
2344

    
2345
      # check all instances for consistency
2346
      for instance in self.cfg.GetAllInstancesInfo().values():
2347
        for nic_idx, nic in enumerate(instance.nics):
2348
          params_copy = copy.deepcopy(nic.nicparams)
2349
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
2350

    
2351
          # check parameter syntax
2352
          try:
2353
            objects.NIC.CheckParameterSyntax(params_filled)
2354
          except errors.ConfigurationError, err:
2355
            nic_errors.append("Instance %s, nic/%d: %s" %
2356
                              (instance.name, nic_idx, err))
2357

    
2358
          # if we're moving instances to routed, check that they have an ip
2359
          target_mode = params_filled[constants.NIC_MODE]
2360
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2361
            nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2362
                              (instance.name, nic_idx))
2363
      if nic_errors:
2364
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2365
                                   "\n".join(nic_errors))
2366

    
2367
    # hypervisor list/parameters
2368
    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
2369
    if self.op.hvparams:
2370
      if not isinstance(self.op.hvparams, dict):
2371
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input",
2372
                                   errors.ECODE_INVAL)
2373
      for hv_name, hv_dict in self.op.hvparams.items():
2374
        if hv_name not in self.new_hvparams:
2375
          self.new_hvparams[hv_name] = hv_dict
2376
        else:
2377
          self.new_hvparams[hv_name].update(hv_dict)
2378

    
2379
    # os hypervisor parameters
2380
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2381
    if self.op.os_hvp:
2382
      if not isinstance(self.op.os_hvp, dict):
2383
        raise errors.OpPrereqError("Invalid 'os_hvp' parameter on input",
2384
                                   errors.ECODE_INVAL)
2385
      for os_name, hvs in self.op.os_hvp.items():
2386
        if not isinstance(hvs, dict):
2387
          raise errors.OpPrereqError(("Invalid 'os_hvp' parameter on"
2388
                                      " input"), errors.ECODE_INVAL)
2389
        if os_name not in self.new_os_hvp:
2390
          self.new_os_hvp[os_name] = hvs
2391
        else:
2392
          for hv_name, hv_dict in hvs.items():
2393
            if hv_name not in self.new_os_hvp[os_name]:
2394
              self.new_os_hvp[os_name][hv_name] = hv_dict
2395
            else:
2396
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
2397

    
2398
    # changes to the hypervisor list
2399
    if self.op.enabled_hypervisors is not None:
2400
      self.hv_list = self.op.enabled_hypervisors
2401
      if not self.hv_list:
2402
        raise errors.OpPrereqError("Enabled hypervisors list must contain at"
2403
                                   " least one member",
2404
                                   errors.ECODE_INVAL)
2405
      invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
2406
      if invalid_hvs:
2407
        raise errors.OpPrereqError("Enabled hypervisors contains invalid"
2408
                                   " entries: %s" %
2409
                                   utils.CommaJoin(invalid_hvs),
2410
                                   errors.ECODE_INVAL)
2411
      for hv in self.hv_list:
2412
        # if the hypervisor doesn't already exist in the cluster
2413
        # hvparams, we initialize it to empty, and then (in both
2414
        # cases) we make sure to fill the defaults, as we might not
2415
        # have a complete defaults list if the hypervisor wasn't
2416
        # enabled before
2417
        if hv not in new_hvp:
2418
          new_hvp[hv] = {}
2419
        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
2420
        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
2421
    else:
2422
      self.hv_list = cluster.enabled_hypervisors
2423

    
2424
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
2425
      # either the enabled list has changed, or the parameters have, validate
2426
      for hv_name, hv_params in self.new_hvparams.items():
2427
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
2428
            (self.op.enabled_hypervisors and
2429
             hv_name in self.op.enabled_hypervisors)):
2430
          # either this is a new hypervisor, or its parameters have changed
2431
          hv_class = hypervisor.GetHypervisor(hv_name)
2432
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2433
          hv_class.CheckParameterSyntax(hv_params)
2434
          _CheckHVParams(self, node_list, hv_name, hv_params)
2435

    
2436
    if self.op.os_hvp:
2437
      # no need to check any newly-enabled hypervisors, since the
2438
      # defaults have already been checked in the above code-block
2439
      for os_name, os_hvp in self.new_os_hvp.items():
2440
        for hv_name, hv_params in os_hvp.items():
2441
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2442
          # we need to fill in the new os_hvp on top of the actual hv_p
2443
          cluster_defaults = self.new_hvparams.get(hv_name, {})
2444
          new_osp = objects.FillDict(cluster_defaults, hv_params)
2445
          hv_class = hypervisor.GetHypervisor(hv_name)
2446
          hv_class.CheckParameterSyntax(new_osp)
2447
          _CheckHVParams(self, node_list, hv_name, new_osp)
2448

    
2449

    
2450
  def Exec(self, feedback_fn):
2451
    """Change the parameters of the cluster.
2452

2453
    """
2454
    if self.op.vg_name is not None:
2455
      new_volume = self.op.vg_name
2456
      if not new_volume:
2457
        new_volume = None
2458
      if new_volume != self.cfg.GetVGName():
2459
        self.cfg.SetVGName(new_volume)
2460
      else:
2461
        feedback_fn("Cluster LVM configuration already in desired"
2462
                    " state, not changing")
2463
    if self.op.hvparams:
2464
      self.cluster.hvparams = self.new_hvparams
2465
    if self.op.os_hvp:
2466
      self.cluster.os_hvp = self.new_os_hvp
2467
    if self.op.enabled_hypervisors is not None:
2468
      self.cluster.hvparams = self.new_hvparams
2469
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2470
    if self.op.beparams:
2471
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2472
    if self.op.nicparams:
2473
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2474

    
2475
    if self.op.candidate_pool_size is not None:
2476
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
2477
      # we need to update the pool size here, otherwise the save will fail
2478
      _AdjustCandidatePool(self, [])
2479

    
2480
    if self.op.maintain_node_health is not None:
2481
      self.cluster.maintain_node_health = self.op.maintain_node_health
2482

    
2483
    if self.op.add_uids is not None:
2484
      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
2485

    
2486
    if self.op.remove_uids is not None:
2487
      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
2488

    
2489
    if self.op.uid_pool is not None:
2490
      self.cluster.uid_pool = self.op.uid_pool
2491

    
2492
    self.cfg.Update(self.cluster, feedback_fn)
2493

    
2494

    
2495
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
2496
  """Distribute additional files which are part of the cluster configuration.
2497

2498
  ConfigWriter takes care of distributing the config and ssconf files, but
2499
  there are more files which should be distributed to all nodes. This function
2500
  makes sure those are copied.
2501

2502
  @param lu: calling logical unit
2503
  @param additional_nodes: list of nodes not in the config to distribute to
2504

2505
  """
2506
  # 1. Gather target nodes
2507
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
2508
  dist_nodes = lu.cfg.GetOnlineNodeList()
2509
  if additional_nodes is not None:
2510
    dist_nodes.extend(additional_nodes)
2511
  if myself.name in dist_nodes:
2512
    dist_nodes.remove(myself.name)
2513

    
2514
  # 2. Gather files to distribute
2515
  dist_files = set([constants.ETC_HOSTS,
2516
                    constants.SSH_KNOWN_HOSTS_FILE,
2517
                    constants.RAPI_CERT_FILE,
2518
                    constants.RAPI_USERS_FILE,
2519
                    constants.CONFD_HMAC_KEY,
2520
                   ])
2521

    
2522
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
2523
  for hv_name in enabled_hypervisors:
2524
    hv_class = hypervisor.GetHypervisor(hv_name)
2525
    dist_files.update(hv_class.GetAncillaryFiles())
2526

    
2527
  # 3. Perform the files upload
2528
  for fname in dist_files:
2529
    if os.path.exists(fname):
2530
      result = lu.rpc.call_upload_file(dist_nodes, fname)
2531
      for to_node, to_result in result.items():
2532
        msg = to_result.fail_msg
2533
        if msg:
2534
          msg = ("Copy of file %s to node %s failed: %s" %
2535
                 (fname, to_node, msg))
2536
          lu.proc.LogWarning(msg)
2537

    
2538

    
2539
class LURedistributeConfig(NoHooksLU):
2540
  """Force the redistribution of cluster configuration.
2541

2542
  This is a very simple LU.
2543

2544
  """
2545
  _OP_REQP = []
2546
  REQ_BGL = False
2547

    
2548
  def ExpandNames(self):
2549
    self.needed_locks = {
2550
      locking.LEVEL_NODE: locking.ALL_SET,
2551
    }
2552
    self.share_locks[locking.LEVEL_NODE] = 1
2553

    
2554
  def CheckPrereq(self):
2555
    """Check prerequisites.
2556

2557
    """
2558

    
2559
  def Exec(self, feedback_fn):
2560
    """Redistribute the configuration.
2561

2562
    """
2563
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
2564
    _RedistributeAncillaryFiles(self)
2565

    
2566

    
2567
def _WaitForSync(lu, instance, oneshot=False):
2568
  """Sleep and poll for an instance's disk to sync.
2569

2570
  """
2571
  if not instance.disks:
2572
    return True
2573

    
2574
  if not oneshot:
2575
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
2576

    
2577
  node = instance.primary_node
2578

    
2579
  for dev in instance.disks:
2580
    lu.cfg.SetDiskID(dev, node)
2581

    
2582
  # TODO: Convert to utils.Retry
2583

    
2584
  retries = 0
2585
  degr_retries = 10 # in seconds, as we sleep 1 second each time
2586
  while True:
2587
    max_time = 0
2588
    done = True
2589
    cumul_degraded = False
2590
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
2591
    msg = rstats.fail_msg
2592
    if msg:
2593
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
2594
      retries += 1
2595
      if retries >= 10:
2596
        raise errors.RemoteError("Can't contact node %s for mirror data,"
2597
                                 " aborting." % node)
2598
      time.sleep(6)
2599
      continue
2600
    rstats = rstats.payload
2601
    retries = 0
2602
    for i, mstat in enumerate(rstats):
2603
      if mstat is None:
2604
        lu.LogWarning("Can't compute data for node %s/%s",
2605
                           node, instance.disks[i].iv_name)
2606
        continue
2607

    
2608
      cumul_degraded = (cumul_degraded or
2609
                        (mstat.is_degraded and mstat.sync_percent is None))
2610
      if mstat.sync_percent is not None:
2611
        done = False
2612
        if mstat.estimated_time is not None:
2613
          rem_time = "%d estimated seconds remaining" % mstat.estimated_time
2614
          max_time = mstat.estimated_time
2615
        else:
2616
          rem_time = "no time estimate"
2617
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
2618
                        (instance.disks[i].iv_name, mstat.sync_percent,
2619
                         rem_time))
2620

    
2621
    # if we're done but degraded, let's do a few small retries, to
2622
    # make sure we see a stable and not transient situation; therefore
2623
    # we force restart of the loop
2624
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
2625
      logging.info("Degraded disks found, %d retries left", degr_retries)
2626
      degr_retries -= 1
2627
      time.sleep(1)
2628
      continue
2629

    
2630
    if done or oneshot:
2631
      break
2632

    
2633
    time.sleep(min(60, max_time))
2634

    
2635
  if done:
2636
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
2637
  return not cumul_degraded
2638

    
2639

    
2640
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
2641
  """Check that mirrors are not degraded.
2642

2643
  The ldisk parameter, if True, will change the test from the
2644
  is_degraded attribute (which represents overall non-ok status for
2645
  the device(s)) to the ldisk (representing the local storage status).
2646

2647
  """
2648
  lu.cfg.SetDiskID(dev, node)
2649

    
2650
  result = True
2651

    
2652
  if on_primary or dev.AssembleOnSecondary():
2653
    rstats = lu.rpc.call_blockdev_find(node, dev)
2654
    msg = rstats.fail_msg
2655
    if msg:
2656
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
2657
      result = False
2658
    elif not rstats.payload:
2659
      lu.LogWarning("Can't find disk on node %s", node)
2660
      result = False
2661
    else:
2662
      if ldisk:
2663
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
2664
      else:
2665
        result = result and not rstats.payload.is_degraded
2666

    
2667
  if dev.children:
2668
    for child in dev.children:
2669
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
2670

    
2671
  return result
2672

    
2673

    
2674
class LUDiagnoseOS(NoHooksLU):
2675
  """Logical unit for OS diagnose/query.
2676

2677
  """
2678
  _OP_REQP = ["output_fields", "names"]
2679
  REQ_BGL = False
2680
  _FIELDS_STATIC = utils.FieldSet()
2681
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants")
2682
  # Fields that need calculation of global os validity
2683
  _FIELDS_NEEDVALID = frozenset(["valid", "variants"])
2684

    
2685
  def ExpandNames(self):
2686
    if self.op.names:
2687
      raise errors.OpPrereqError("Selective OS query not supported",
2688
                                 errors.ECODE_INVAL)
2689

    
2690
    _CheckOutputFields(static=self._FIELDS_STATIC,
2691
                       dynamic=self._FIELDS_DYNAMIC,
2692
                       selected=self.op.output_fields)
2693

    
2694
    # Lock all nodes, in shared mode
2695
    # Temporary removal of locks, should be reverted later
2696
    # TODO: reintroduce locks when they are lighter-weight
2697
    self.needed_locks = {}
2698
    #self.share_locks[locking.LEVEL_NODE] = 1
2699
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2700

    
2701
  def CheckPrereq(self):
2702
    """Check prerequisites.
2703

2704
    """
2705

    
2706
  @staticmethod
2707
  def _DiagnoseByOS(rlist):
2708
    """Remaps a per-node return list into an a per-os per-node dictionary
2709

2710
    @param rlist: a map with node names as keys and OS objects as values
2711

2712
    @rtype: dict
2713
    @return: a dictionary with osnames as keys and as value another map, with
2714
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
2715

2716
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
2717
                                     (/srv/..., False, "invalid api")],
2718
                           "node2": [(/srv/..., True, "")]}
2719
          }
2720

2721
    """
2722
    all_os = {}
2723
    # we build here the list of nodes that didn't fail the RPC (at RPC
2724
    # level), so that nodes with a non-responding node daemon don't
2725
    # make all OSes invalid
2726
    good_nodes = [node_name for node_name in rlist
2727
                  if not rlist[node_name].fail_msg]
2728
    for node_name, nr in rlist.items():
2729
      if nr.fail_msg or not nr.payload:
2730
        continue
2731
      for name, path, status, diagnose, variants in nr.payload:
2732
        if name not in all_os:
2733
          # build a list of nodes for this os containing empty lists
2734
          # for each node in node_list
2735
          all_os[name] = {}
2736
          for nname in good_nodes:
2737
            all_os[name][nname] = []
2738
        all_os[name][node_name].append((path, status, diagnose, variants))
2739
    return all_os
2740

    
2741
  def Exec(self, feedback_fn):
2742
    """Compute the list of OSes.
2743

2744
    """
2745
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
2746
    node_data = self.rpc.call_os_diagnose(valid_nodes)
2747
    pol = self._DiagnoseByOS(node_data)
2748
    output = []
2749
    calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields)
2750
    calc_variants = "variants" in self.op.output_fields
2751

    
2752
    for os_name, os_data in pol.items():
2753
      row = []
2754
      if calc_valid:
2755
        valid = True
2756
        variants = None
2757
        for osl in os_data.values():
2758
          valid = valid and osl and osl[0][1]
2759
          if not valid:
2760
            variants = None
2761
            break
2762
          if calc_variants:
2763
            node_variants = osl[0][3]
2764
            if variants is None:
2765
              variants = node_variants
2766
            else:
2767
              variants = [v for v in variants if v in node_variants]
2768

    
2769
      for field in self.op.output_fields:
2770
        if field == "name":
2771
          val = os_name
2772
        elif field == "valid":
2773
          val = valid
2774
        elif field == "node_status":
2775
          # this is just a copy of the dict
2776
          val = {}
2777
          for node_name, nos_list in os_data.items():
2778
            val[node_name] = nos_list
2779
        elif field == "variants":
2780
          val =  variants
2781
        else:
2782
          raise errors.ParameterError(field)
2783
        row.append(val)
2784
      output.append(row)
2785

    
2786
    return output
2787

    
2788

    
2789
class LURemoveNode(LogicalUnit):
2790
  """Logical unit for removing a node.
2791

2792
  """
2793
  HPATH = "node-remove"
2794
  HTYPE = constants.HTYPE_NODE
2795
  _OP_REQP = ["node_name"]
2796

    
2797
  def BuildHooksEnv(self):
2798
    """Build hooks env.
2799

2800
    This doesn't run on the target node in the pre phase as a failed
2801
    node would then be impossible to remove.
2802

2803
    """
2804
    env = {
2805
      "OP_TARGET": self.op.node_name,
2806
      "NODE_NAME": self.op.node_name,
2807
      }
2808
    all_nodes = self.cfg.GetNodeList()
2809
    try:
2810
      all_nodes.remove(self.op.node_name)
2811
    except ValueError:
2812
      logging.warning("Node %s which is about to be removed not found"
2813
                      " in the all nodes list", self.op.node_name)
2814
    return env, all_nodes, all_nodes
2815

    
2816
  def CheckPrereq(self):
2817
    """Check prerequisites.
2818

2819
    This checks:
2820
     - the node exists in the configuration
2821
     - it does not have primary or secondary instances
2822
     - it's not the master
2823

2824
    Any errors are signaled by raising errors.OpPrereqError.
2825

2826
    """
2827
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
2828
    node = self.cfg.GetNodeInfo(self.op.node_name)
2829
    assert node is not None
2830

    
2831
    instance_list = self.cfg.GetInstanceList()
2832

    
2833
    masternode = self.cfg.GetMasterNode()
2834
    if node.name == masternode:
2835
      raise errors.OpPrereqError("Node is the master node,"
2836
                                 " you need to failover first.",
2837
                                 errors.ECODE_INVAL)
2838

    
2839
    for instance_name in instance_list:
2840
      instance = self.cfg.GetInstanceInfo(instance_name)
2841
      if node.name in instance.all_nodes:
2842
        raise errors.OpPrereqError("Instance %s is still running on the node,"
2843
                                   " please remove first." % instance_name,
2844
                                   errors.ECODE_INVAL)
2845
    self.op.node_name = node.name
2846
    self.node = node
2847

    
2848
  def Exec(self, feedback_fn):
2849
    """Removes the node from the cluster.
2850

2851
    """
2852
    node = self.node
2853
    logging.info("Stopping the node daemon and removing configs from node %s",
2854
                 node.name)
2855

    
2856
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
2857

    
2858
    # Promote nodes to master candidate as needed
2859
    _AdjustCandidatePool(self, exceptions=[node.name])
2860
    self.context.RemoveNode(node.name)
2861

    
2862
    # Run post hooks on the node before it's removed
2863
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
2864
    try:
2865
      hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
2866
    except:
2867
      # pylint: disable-msg=W0702
2868
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
2869

    
2870
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
2871
    msg = result.fail_msg
2872
    if msg:
2873
      self.LogWarning("Errors encountered on the remote node while leaving"
2874
                      " the cluster: %s", msg)
2875

    
2876
    # Remove node from our /etc/hosts
2877
    if self.cfg.GetClusterInfo().modify_etc_hosts:
2878
      # FIXME: this should be done via an rpc call to node daemon
2879
      utils.RemoveHostFromEtcHosts(node.name)
2880
      _RedistributeAncillaryFiles(self)
2881

    
2882

    
2883
class LUQueryNodes(NoHooksLU):
2884
  """Logical unit for querying nodes.
2885

2886
  """
2887
  # pylint: disable-msg=W0142
2888
  _OP_REQP = ["output_fields", "names", "use_locking"]
2889
  REQ_BGL = False
2890

    
2891
  _SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid",
2892
                    "master_candidate", "offline", "drained"]
2893

    
2894
  _FIELDS_DYNAMIC = utils.FieldSet(
2895
    "dtotal", "dfree",
2896
    "mtotal", "mnode", "mfree",
2897
    "bootid",
2898
    "ctotal", "cnodes", "csockets",
2899
    )
2900

    
2901
  _FIELDS_STATIC = utils.FieldSet(*[
2902
    "pinst_cnt", "sinst_cnt",
2903
    "pinst_list", "sinst_list",
2904
    "pip", "sip", "tags",
2905
    "master",
2906
    "role"] + _SIMPLE_FIELDS
2907
    )
2908

    
2909
  def ExpandNames(self):
2910
    _CheckOutputFields(static=self._FIELDS_STATIC,
2911
                       dynamic=self._FIELDS_DYNAMIC,
2912
                       selected=self.op.output_fields)
2913

    
2914
    self.needed_locks = {}
2915
    self.share_locks[locking.LEVEL_NODE] = 1
2916

    
2917
    if self.op.names:
2918
      self.wanted = _GetWantedNodes(self, self.op.names)
2919
    else:
2920
      self.wanted = locking.ALL_SET
2921

    
2922
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2923
    self.do_locking = self.do_node_query and self.op.use_locking
2924
    if self.do_locking:
2925
      # if we don't request only static fields, we need to lock the nodes
2926
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2927

    
2928
  def CheckPrereq(self):
2929
    """Check prerequisites.
2930

2931
    """
2932
    # The validation of the node list is done in the _GetWantedNodes,
2933
    # if non empty, and if empty, there's no validation to do
2934
    pass
2935

    
2936
  def Exec(self, feedback_fn):
2937
    """Computes the list of nodes and their attributes.
2938

2939
    """
2940
    all_info = self.cfg.GetAllNodesInfo()
2941
    if self.do_locking:
2942
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2943
    elif self.wanted != locking.ALL_SET:
2944
      nodenames = self.wanted
2945
      missing = set(nodenames).difference(all_info.keys())
2946
      if missing:
2947
        raise errors.OpExecError(
2948
          "Some nodes were removed before retrieving their data: %s" % missing)
2949
    else:
2950
      nodenames = all_info.keys()
2951

    
2952
    nodenames = utils.NiceSort(nodenames)
2953
    nodelist = [all_info[name] for name in nodenames]
2954

    
2955
    # begin data gathering
2956

    
2957
    if self.do_node_query:
2958
      live_data = {}
2959
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2960
                                          self.cfg.GetHypervisorType())
2961
      for name in nodenames:
2962
        nodeinfo = node_data[name]
2963
        if not nodeinfo.fail_msg and nodeinfo.payload:
2964
          nodeinfo = nodeinfo.payload
2965
          fn = utils.TryConvert
2966
          live_data[name] = {
2967
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2968
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2969
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2970
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2971
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2972
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2973
            "bootid": nodeinfo.get('bootid', None),
2974
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2975
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2976
            }
2977
        else:
2978
          live_data[name] = {}
2979
    else:
2980
      live_data = dict.fromkeys(nodenames, {})
2981

    
2982
    node_to_primary = dict([(name, set()) for name in nodenames])
2983
    node_to_secondary = dict([(name, set()) for name in nodenames])
2984

    
2985
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2986
                             "sinst_cnt", "sinst_list"))
2987
    if inst_fields & frozenset(self.op.output_fields):
2988
      inst_data = self.cfg.GetAllInstancesInfo()
2989

    
2990
      for inst in inst_data.values():
2991
        if inst.primary_node in node_to_primary:
2992
          node_to_primary[inst.primary_node].add(inst.name)
2993
        for secnode in inst.secondary_nodes:
2994
          if secnode in node_to_secondary:
2995
            node_to_secondary[secnode].add(inst.name)
2996

    
2997
    master_node = self.cfg.GetMasterNode()
2998

    
2999
    # end data gathering
3000

    
3001
    output = []
3002
    for node in nodelist:
3003
      node_output = []
3004
      for field in self.op.output_fields:
3005
        if field in self._SIMPLE_FIELDS:
3006
          val = getattr(node, field)
3007
        elif field == "pinst_list":
3008
          val = list(node_to_primary[node.name])
3009
        elif field == "sinst_list":
3010
          val = list(node_to_secondary[node.name])
3011
        elif field == "pinst_cnt":
3012
          val = len(node_to_primary[node.name])
3013
        elif field == "sinst_cnt":
3014
          val = len(node_to_secondary[node.name])
3015
        elif field == "pip":
3016
          val = node.primary_ip
3017
        elif field == "sip":
3018
          val = node.secondary_ip
3019
        elif field == "tags":
3020
          val = list(node.GetTags())
3021
        elif field == "master":
3022
          val = node.name == master_node
3023
        elif self._FIELDS_DYNAMIC.Matches(field):
3024
          val = live_data[node.name].get(field, None)
3025
        elif field == "role":
3026
          if node.name == master_node:
3027
            val = "M"
3028
          elif node.master_candidate:
3029
            val = "C"
3030
          elif node.drained:
3031
            val = "D"
3032
          elif node.offline:
3033
            val = "O"
3034
          else:
3035
            val = "R"
3036
        else:
3037
          raise errors.ParameterError(field)
3038
        node_output.append(val)
3039
      output.append(node_output)
3040

    
3041
    return output
3042

    
3043

    
3044
class LUQueryNodeVolumes(NoHooksLU):
3045
  """Logical unit for getting volumes on node(s).
3046

3047
  """
3048
  _OP_REQP = ["nodes", "output_fields"]
3049
  REQ_BGL = False
3050
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3051
  _FIELDS_STATIC = utils.FieldSet("node")
3052

    
3053
  def ExpandNames(self):
3054
    _CheckOutputFields(static=self._FIELDS_STATIC,
3055
                       dynamic=self._FIELDS_DYNAMIC,
3056
                       selected=self.op.output_fields)
3057

    
3058
    self.needed_locks = {}
3059
    self.share_locks[locking.LEVEL_NODE] = 1
3060
    if not self.op.nodes:
3061
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3062
    else:
3063
      self.needed_locks[locking.LEVEL_NODE] = \
3064
        _GetWantedNodes(self, self.op.nodes)
3065

    
3066
  def CheckPrereq(self):
3067
    """Check prerequisites.
3068

3069
    This checks that the fields required are valid output fields.
3070

3071
    """
3072
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3073

    
3074
  def Exec(self, feedback_fn):
3075
    """Computes the list of nodes and their attributes.
3076

3077
    """
3078
    nodenames = self.nodes
3079
    volumes = self.rpc.call_node_volumes(nodenames)
3080

    
3081
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
3082
             in self.cfg.GetInstanceList()]
3083

    
3084
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3085

    
3086
    output = []
3087
    for node in nodenames:
3088
      nresult = volumes[node]
3089
      if nresult.offline:
3090
        continue
3091
      msg = nresult.fail_msg
3092
      if msg:
3093
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3094
        continue
3095

    
3096
      node_vols = nresult.payload[:]
3097
      node_vols.sort(key=lambda vol: vol['dev'])
3098

    
3099
      for vol in node_vols:
3100
        node_output = []
3101
        for field in self.op.output_fields:
3102
          if field == "node":
3103
            val = node
3104
          elif field == "phys":
3105
            val = vol['dev']
3106
          elif field == "vg":
3107
            val = vol['vg']
3108
          elif field == "name":
3109
            val = vol['name']
3110
          elif field == "size":
3111
            val = int(float(vol['size']))
3112
          elif field == "instance":
3113
            for inst in ilist:
3114
              if node not in lv_by_node[inst]:
3115
                continue
3116
              if vol['name'] in lv_by_node[inst][node]:
3117
                val = inst.name
3118
                break
3119
            else:
3120
              val = '-'
3121
          else:
3122
            raise errors.ParameterError(field)
3123
          node_output.append(str(val))
3124

    
3125
        output.append(node_output)
3126

    
3127
    return output
3128

    
3129

    
3130
class LUQueryNodeStorage(NoHooksLU):
3131
  """Logical unit for getting information on storage units on node(s).
3132

3133
  """
3134
  _OP_REQP = ["nodes", "storage_type", "output_fields"]
3135
  REQ_BGL = False
3136
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3137

    
3138
  def CheckArguments(self):
3139
    _CheckStorageType(self.op.storage_type)
3140

    
3141
    _CheckOutputFields(static=self._FIELDS_STATIC,
3142
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3143
                       selected=self.op.output_fields)
3144

    
3145
  def ExpandNames(self):
3146
    self.needed_locks = {}
3147
    self.share_locks[locking.LEVEL_NODE] = 1
3148

    
3149
    if self.op.nodes:
3150
      self.needed_locks[locking.LEVEL_NODE] = \
3151
        _GetWantedNodes(self, self.op.nodes)
3152
    else:
3153
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3154

    
3155
  def CheckPrereq(self):
3156
    """Check prerequisites.
3157

3158
    This checks that the fields required are valid output fields.
3159

3160
    """
3161
    self.op.name = getattr(self.op, "name", None)
3162

    
3163
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3164

    
3165
  def Exec(self, feedback_fn):
3166
    """Computes the list of nodes and their attributes.
3167

3168
    """
3169
    # Always get name to sort by
3170
    if constants.SF_NAME in self.op.output_fields:
3171
      fields = self.op.output_fields[:]
3172
    else:
3173
      fields = [constants.SF_NAME] + self.op.output_fields
3174

    
3175
    # Never ask for node or type as it's only known to the LU
3176
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
3177
      while extra in fields:
3178
        fields.remove(extra)
3179

    
3180
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3181
    name_idx = field_idx[constants.SF_NAME]
3182

    
3183
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3184
    data = self.rpc.call_storage_list(self.nodes,
3185
                                      self.op.storage_type, st_args,
3186
                                      self.op.name, fields)
3187

    
3188
    result = []
3189

    
3190
    for node in utils.NiceSort(self.nodes):
3191
      nresult = data[node]
3192
      if nresult.offline:
3193
        continue
3194

    
3195
      msg = nresult.fail_msg
3196
      if msg:
3197
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3198
        continue
3199

    
3200
      rows = dict([(row[name_idx], row) for row in nresult.payload])
3201

    
3202
      for name in utils.NiceSort(rows.keys()):
3203
        row = rows[name]
3204

    
3205
        out = []
3206

    
3207
        for field in self.op.output_fields:
3208
          if field == constants.SF_NODE:
3209
            val = node
3210
          elif field == constants.SF_TYPE:
3211
            val = self.op.storage_type
3212
          elif field in field_idx:
3213
            val = row[field_idx[field]]
3214
          else:
3215
            raise errors.ParameterError(field)
3216

    
3217
          out.append(val)
3218

    
3219
        result.append(out)
3220

    
3221
    return result
3222

    
3223

    
3224
class LUModifyNodeStorage(NoHooksLU):
3225
  """Logical unit for modifying a storage volume on a node.
3226

3227
  """
3228
  _OP_REQP = ["node_name", "storage_type", "name", "changes"]
3229
  REQ_BGL = False
3230

    
3231
  def CheckArguments(self):
3232
    self.opnode_name = _ExpandNodeName(self.cfg, self.op.node_name)
3233

    
3234
    _CheckStorageType(self.op.storage_type)
3235

    
3236
  def ExpandNames(self):
3237
    self.needed_locks = {
3238
      locking.LEVEL_NODE: self.op.node_name,
3239
      }
3240

    
3241
  def CheckPrereq(self):
3242
    """Check prerequisites.
3243

3244
    """
3245
    storage_type = self.op.storage_type
3246

    
3247
    try:
3248
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
3249
    except KeyError:
3250
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
3251
                                 " modified" % storage_type,
3252
                                 errors.ECODE_INVAL)
3253

    
3254
    diff = set(self.op.changes.keys()) - modifiable
3255
    if diff:
3256
      raise errors.OpPrereqError("The following fields can not be modified for"
3257
                                 " storage units of type '%s': %r" %
3258
                                 (storage_type, list(diff)),
3259
                                 errors.ECODE_INVAL)
3260

    
3261
  def Exec(self, feedback_fn):
3262
    """Computes the list of nodes and their attributes.
3263

3264
    """
3265
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3266
    result = self.rpc.call_storage_modify(self.op.node_name,
3267
                                          self.op.storage_type, st_args,
3268
                                          self.op.name, self.op.changes)
3269
    result.Raise("Failed to modify storage unit '%s' on %s" %
3270
                 (self.op.name, self.op.node_name))
3271

    
3272

    
3273
class LUAddNode(LogicalUnit):
3274
  """Logical unit for adding node to the cluster.
3275

3276
  """
3277
  HPATH = "node-add"
3278
  HTYPE = constants.HTYPE_NODE
3279
  _OP_REQP = ["node_name"]
3280

    
3281
  def CheckArguments(self):
3282
    # validate/normalize the node name
3283
    self.op.node_name = utils.HostInfo.NormalizeName(self.op.node_name)
3284

    
3285
  def BuildHooksEnv(self):
3286
    """Build hooks env.
3287

3288
    This will run on all nodes before, and on all nodes + the new node after.
3289

3290
    """
3291
    env = {
3292
      "OP_TARGET": self.op.node_name,
3293
      "NODE_NAME": self.op.node_name,
3294
      "NODE_PIP": self.op.primary_ip,
3295
      "NODE_SIP": self.op.secondary_ip,
3296
      }
3297
    nodes_0 = self.cfg.GetNodeList()
3298
    nodes_1 = nodes_0 + [self.op.node_name, ]
3299
    return env, nodes_0, nodes_1
3300

    
3301
  def CheckPrereq(self):
3302
    """Check prerequisites.
3303

3304
    This checks:
3305
     - the new node is not already in the config
3306
     - it is resolvable
3307
     - its parameters (single/dual homed) matches the cluster
3308

3309
    Any errors are signaled by raising errors.OpPrereqError.
3310

3311
    """
3312
    node_name = self.op.node_name
3313
    cfg = self.cfg
3314

    
3315
    dns_data = utils.GetHostInfo(node_name)
3316

    
3317
    node = dns_data.name
3318
    primary_ip = self.op.primary_ip = dns_data.ip
3319
    secondary_ip = getattr(self.op, "secondary_ip", None)
3320
    if secondary_ip is None:
3321
      secondary_ip = primary_ip
3322
    if not utils.IsValidIP(secondary_ip):
3323
      raise errors.OpPrereqError("Invalid secondary IP given",
3324
                                 errors.ECODE_INVAL)
3325
    self.op.secondary_ip = secondary_ip
3326

    
3327
    node_list = cfg.GetNodeList()
3328
    if not self.op.readd and node in node_list:
3329
      raise errors.OpPrereqError("Node %s is already in the configuration" %
3330
                                 node, errors.ECODE_EXISTS)
3331
    elif self.op.readd and node not in node_list:
3332
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
3333
                                 errors.ECODE_NOENT)
3334

    
3335
    self.changed_primary_ip = False
3336

    
3337
    for existing_node_name in node_list:
3338
      existing_node = cfg.GetNodeInfo(existing_node_name)
3339

    
3340
      if self.op.readd and node == existing_node_name:
3341
        if existing_node.secondary_ip != secondary_ip:
3342
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
3343
                                     " address configuration as before",
3344
                                     errors.ECODE_INVAL)
3345
        if existing_node.primary_ip != primary_ip:
3346
          self.changed_primary_ip = True
3347

    
3348
        continue
3349

    
3350
      if (existing_node.primary_ip == primary_ip or
3351
          existing_node.secondary_ip == primary_ip or
3352
          existing_node.primary_ip == secondary_ip or
3353
          existing_node.secondary_ip == secondary_ip):
3354
        raise errors.OpPrereqError("New node ip address(es) conflict with"
3355
                                   " existing node %s" % existing_node.name,
3356
                                   errors.ECODE_NOTUNIQUE)
3357

    
3358
    # check that the type of the node (single versus dual homed) is the
3359
    # same as for the master
3360
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
3361
    master_singlehomed = myself.secondary_ip == myself.primary_ip
3362
    newbie_singlehomed = secondary_ip == primary_ip
3363
    if master_singlehomed != newbie_singlehomed:
3364
      if master_singlehomed:
3365
        raise errors.OpPrereqError("The master has no private ip but the"
3366
                                   " new node has one",
3367
                                   errors.ECODE_INVAL)
3368
      else:
3369
        raise errors.OpPrereqError("The master has a private ip but the"
3370
                                   " new node doesn't have one",
3371
                                   errors.ECODE_INVAL)
3372

    
3373
    # checks reachability
3374
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
3375
      raise errors.OpPrereqError("Node not reachable by ping",
3376
                                 errors.ECODE_ENVIRON)
3377

    
3378
    if not newbie_singlehomed:
3379
      # check reachability from my secondary ip to newbie's secondary ip
3380
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
3381
                           source=myself.secondary_ip):
3382
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
3383
                                   " based ping to noded port",
3384
                                   errors.ECODE_ENVIRON)
3385

    
3386
    if self.op.readd:
3387
      exceptions = [node]
3388
    else:
3389
      exceptions = []
3390

    
3391
    self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
3392

    
3393
    if self.op.readd:
3394
      self.new_node = self.cfg.GetNodeInfo(node)
3395
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
3396
    else:
3397
      self.new_node = objects.Node(name=node,
3398
                                   primary_ip=primary_ip,
3399
                                   secondary_ip=secondary_ip,
3400
                                   master_candidate=self.master_candidate,
3401
                                   offline=False, drained=False)
3402

    
3403
  def Exec(self, feedback_fn):
3404
    """Adds the new node to the cluster.
3405

3406
    """
3407
    new_node = self.new_node
3408
    node = new_node.name
3409

    
3410
    # for re-adds, reset the offline/drained/master-candidate flags;
3411
    # we need to reset here, otherwise offline would prevent RPC calls
3412
    # later in the procedure; this also means that if the re-add
3413
    # fails, we are left with a non-offlined, broken node
3414
    if self.op.readd:
3415
      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
3416
      self.LogInfo("Readding a node, the offline/drained flags were reset")
3417
      # if we demote the node, we do cleanup later in the procedure
3418
      new_node.master_candidate = self.master_candidate
3419
      if self.changed_primary_ip:
3420
        new_node.primary_ip = self.op.primary_ip
3421

    
3422
    # notify the user about any possible mc promotion
3423
    if new_node.master_candidate:
3424
      self.LogInfo("Node will be a master candidate")
3425

    
3426
    # check connectivity
3427
    result = self.rpc.call_version([node])[node]
3428
    result.Raise("Can't get version information from node %s" % node)
3429
    if constants.PROTOCOL_VERSION == result.payload:
3430
      logging.info("Communication to node %s fine, sw version %s match",
3431
                   node, result.payload)
3432
    else:
3433
      raise errors.OpExecError("Version mismatch master version %s,"
3434
                               " node version %s" %
3435
                               (constants.PROTOCOL_VERSION, result.payload))
3436

    
3437
    # setup ssh on node
3438
    if self.cfg.GetClusterInfo().modify_ssh_setup:
3439
      logging.info("Copy ssh key to node %s", node)
3440
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
3441
      keyarray = []
3442
      keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
3443
                  constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
3444
                  priv_key, pub_key]
3445

    
3446
      for i in keyfiles:
3447
        keyarray.append(utils.ReadFile(i))
3448

    
3449
      result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
3450
                                      keyarray[2], keyarray[3], keyarray[4],
3451
                                      keyarray[5])
3452
      result.Raise("Cannot transfer ssh keys to the new node")
3453

    
3454
    # Add node to our /etc/hosts, and add key to known_hosts
3455
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3456
      # FIXME: this should be done via an rpc call to node daemon
3457
      utils.AddHostToEtcHosts(new_node.name)
3458

    
3459
    if new_node.secondary_ip != new_node.primary_ip:
3460
      result = self.rpc.call_node_has_ip_address(new_node.name,
3461
                                                 new_node.secondary_ip)
3462
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
3463
                   prereq=True, ecode=errors.ECODE_ENVIRON)
3464
      if not result.payload:
3465
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
3466
                                 " you gave (%s). Please fix and re-run this"
3467
                                 " command." % new_node.secondary_ip)
3468

    
3469
    node_verify_list = [self.cfg.GetMasterNode()]
3470
    node_verify_param = {
3471
      constants.NV_NODELIST: [node],
3472
      # TODO: do a node-net-test as well?
3473
    }
3474

    
3475
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
3476
                                       self.cfg.GetClusterName())
3477
    for verifier in node_verify_list:
3478
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
3479
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
3480
      if nl_payload:
3481
        for failed in nl_payload:
3482
          feedback_fn("ssh/hostname verification failed"
3483
                      " (checking from %s): %s" %
3484
                      (verifier, nl_payload[failed]))
3485
        raise errors.OpExecError("ssh/hostname verification failed.")
3486

    
3487
    if self.op.readd:
3488
      _RedistributeAncillaryFiles(self)
3489
      self.context.ReaddNode(new_node)
3490
      # make sure we redistribute the config
3491
      self.cfg.Update(new_node, feedback_fn)
3492
      # and make sure the new node will not have old files around
3493
      if not new_node.master_candidate:
3494
        result = self.rpc.call_node_demote_from_mc(new_node.name)
3495
        msg = result.fail_msg
3496
        if msg:
3497
          self.LogWarning("Node failed to demote itself from master"
3498
                          " candidate status: %s" % msg)
3499
    else:
3500
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
3501
      self.context.AddNode(new_node, self.proc.GetECId())
3502

    
3503

    
3504
class LUSetNodeParams(LogicalUnit):
3505
  """Modifies the parameters of a node.
3506

3507
  """
3508
  HPATH = "node-modify"
3509
  HTYPE = constants.HTYPE_NODE
3510
  _OP_REQP = ["node_name"]
3511
  REQ_BGL = False
3512

    
3513
  def CheckArguments(self):
3514
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3515
    _CheckBooleanOpField(self.op, 'master_candidate')
3516
    _CheckBooleanOpField(self.op, 'offline')
3517
    _CheckBooleanOpField(self.op, 'drained')
3518
    _CheckBooleanOpField(self.op, 'auto_promote')
3519
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
3520
    if all_mods.count(None) == 3:
3521
      raise errors.OpPrereqError("Please pass at least one modification",
3522
                                 errors.ECODE_INVAL)
3523
    if all_mods.count(True) > 1:
3524
      raise errors.OpPrereqError("Can't set the node into more than one"
3525
                                 " state at the same time",
3526
                                 errors.ECODE_INVAL)
3527

    
3528
    # Boolean value that tells us whether we're offlining or draining the node
3529
    self.offline_or_drain = (self.op.offline == True or
3530
                             self.op.drained == True)
3531
    self.deoffline_or_drain = (self.op.offline == False or
3532
                               self.op.drained == False)
3533
    self.might_demote = (self.op.master_candidate == False or
3534
                         self.offline_or_drain)
3535

    
3536
    self.lock_all = self.op.auto_promote and self.might_demote
3537

    
3538

    
3539
  def ExpandNames(self):
3540
    if self.lock_all:
3541
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
3542
    else:
3543
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
3544

    
3545
  def BuildHooksEnv(self):
3546
    """Build hooks env.
3547

3548
    This runs on the master node.
3549

3550
    """
3551
    env = {
3552
      "OP_TARGET": self.op.node_name,
3553
      "MASTER_CANDIDATE": str(self.op.master_candidate),
3554
      "OFFLINE": str(self.op.offline),
3555
      "DRAINED": str(self.op.drained),
3556
      }
3557
    nl = [self.cfg.GetMasterNode(),
3558
          self.op.node_name]
3559
    return env, nl, nl
3560

    
3561
  def CheckPrereq(self):
3562
    """Check prerequisites.
3563

3564
    This only checks the instance list against the existing names.
3565

3566
    """
3567
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
3568

    
3569
    if (self.op.master_candidate is not None or
3570
        self.op.drained is not None or
3571
        self.op.offline is not None):
3572
      # we can't change the master's node flags
3573
      if self.op.node_name == self.cfg.GetMasterNode():
3574
        raise errors.OpPrereqError("The master role can be changed"
3575
                                   " only via masterfailover",
3576
                                   errors.ECODE_INVAL)
3577

    
3578

    
3579
    if node.master_candidate and self.might_demote and not self.lock_all:
3580
      assert not self.op.auto_promote, "auto-promote set but lock_all not"
3581
      # check if after removing the current node, we're missing master
3582
      # candidates
3583
      (mc_remaining, mc_should, _) = \
3584
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
3585
      if mc_remaining < mc_should:
3586
        raise errors.OpPrereqError("Not enough master candidates, please"
3587
                                   " pass auto_promote to allow promotion",
3588
                                   errors.ECODE_INVAL)
3589

    
3590
    if (self.op.master_candidate == True and
3591
        ((node.offline and not self.op.offline == False) or
3592
         (node.drained and not self.op.drained == False))):
3593
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
3594
                                 " to master_candidate" % node.name,
3595
                                 errors.ECODE_INVAL)
3596

    
3597
    # If we're being deofflined/drained, we'll MC ourself if needed
3598
    if (self.deoffline_or_drain and not self.offline_or_drain and not
3599
        self.op.master_candidate == True and not node.master_candidate):
3600
      self.op.master_candidate = _DecideSelfPromotion(self)
3601
      if self.op.master_candidate:
3602
        self.LogInfo("Autopromoting node to master candidate")
3603

    
3604
    return
3605

    
3606
  def Exec(self, feedback_fn):
3607
    """Modifies a node.
3608

3609
    """
3610
    node = self.node
3611

    
3612
    result = []
3613
    changed_mc = False
3614

    
3615
    if self.op.offline is not None:
3616
      node.offline = self.op.offline
3617
      result.append(("offline", str(self.op.offline)))
3618
      if self.op.offline == True:
3619
        if node.master_candidate:
3620
          node.master_candidate = False
3621
          changed_mc = True
3622
          result.append(("master_candidate", "auto-demotion due to offline"))
3623
        if node.drained:
3624
          node.drained = False
3625
          result.append(("drained", "clear drained status due to offline"))
3626

    
3627
    if self.op.master_candidate is not None:
3628
      node.master_candidate = self.op.master_candidate
3629
      changed_mc = True
3630
      result.append(("master_candidate", str(self.op.master_candidate)))
3631
      if self.op.master_candidate == False:
3632
        rrc = self.rpc.call_node_demote_from_mc(node.name)
3633
        msg = rrc.fail_msg
3634
        if msg:
3635
          self.LogWarning("Node failed to demote itself: %s" % msg)
3636

    
3637
    if self.op.drained is not None:
3638
      node.drained = self.op.drained
3639
      result.append(("drained", str(self.op.drained)))
3640
      if self.op.drained == True:
3641
        if node.master_candidate:
3642
          node.master_candidate = False
3643
          changed_mc = True
3644
          result.append(("master_candidate", "auto-demotion due to drain"))
3645
          rrc = self.rpc.call_node_demote_from_mc(node.name)
3646
          msg = rrc.fail_msg
3647
          if msg:
3648
            self.LogWarning("Node failed to demote itself: %s" % msg)
3649
        if node.offline:
3650
          node.offline = False
3651
          result.append(("offline", "clear offline status due to drain"))
3652

    
3653
    # we locked all nodes, we adjust the CP before updating this node
3654
    if self.lock_all:
3655
      _AdjustCandidatePool(self, [node.name])
3656

    
3657
    # this will trigger configuration file update, if needed
3658
    self.cfg.Update(node, feedback_fn)
3659

    
3660
    # this will trigger job queue propagation or cleanup
3661
    if changed_mc:
3662
      self.context.ReaddNode(node)
3663

    
3664
    return result
3665

    
3666

    
3667
class LUPowercycleNode(NoHooksLU):
3668
  """Powercycles a node.
3669

3670
  """
3671
  _OP_REQP = ["node_name", "force"]
3672
  REQ_BGL = False
3673

    
3674
  def CheckArguments(self):
3675
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3676
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
3677
      raise errors.OpPrereqError("The node is the master and the force"
3678
                                 " parameter was not set",
3679
                                 errors.ECODE_INVAL)
3680

    
3681
  def ExpandNames(self):
3682
    """Locking for PowercycleNode.
3683

3684
    This is a last-resort option and shouldn't block on other
3685
    jobs. Therefore, we grab no locks.
3686

3687
    """
3688
    self.needed_locks = {}
3689

    
3690
  def CheckPrereq(self):
3691
    """Check prerequisites.
3692

3693
    This LU has no prereqs.
3694

3695
    """
3696
    pass
3697

    
3698
  def Exec(self, feedback_fn):
3699
    """Reboots a node.
3700

3701
    """
3702
    result = self.rpc.call_node_powercycle(self.op.node_name,
3703
                                           self.cfg.GetHypervisorType())
3704
    result.Raise("Failed to schedule the reboot")
3705
    return result.payload
3706

    
3707

    
3708
class LUQueryClusterInfo(NoHooksLU):
3709
  """Query cluster configuration.
3710

3711
  """
3712
  _OP_REQP = []
3713
  REQ_BGL = False
3714

    
3715
  def ExpandNames(self):
3716
    self.needed_locks = {}
3717

    
3718
  def CheckPrereq(self):
3719
    """No prerequsites needed for this LU.
3720

3721
    """
3722
    pass
3723

    
3724
  def Exec(self, feedback_fn):
3725
    """Return cluster config.
3726

3727
    """
3728
    cluster = self.cfg.GetClusterInfo()
3729
    os_hvp = {}
3730

    
3731
    # Filter just for enabled hypervisors
3732
    for os_name, hv_dict in cluster.os_hvp.items():
3733
      os_hvp[os_name] = {}
3734
      for hv_name, hv_params in hv_dict.items():
3735
        if hv_name in cluster.enabled_hypervisors:
3736
          os_hvp[os_name][hv_name] = hv_params
3737

    
3738
    result = {
3739
      "software_version": constants.RELEASE_VERSION,
3740
      "protocol_version": constants.PROTOCOL_VERSION,
3741
      "config_version": constants.CONFIG_VERSION,
3742
      "os_api_version": max(constants.OS_API_VERSIONS),
3743
      "export_version": constants.EXPORT_VERSION,
3744
      "architecture": (platform.architecture()[0], platform.machine()),
3745
      "name": cluster.cluster_name,
3746
      "master": cluster.master_node,
3747
      "default_hypervisor": cluster.enabled_hypervisors[0],
3748
      "enabled_hypervisors": cluster.enabled_hypervisors,
3749
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
3750
                        for hypervisor_name in cluster.enabled_hypervisors]),
3751
      "os_hvp": os_hvp,
3752
      "beparams": cluster.beparams,
3753
      "nicparams": cluster.nicparams,
3754
      "candidate_pool_size": cluster.candidate_pool_size,
3755
      "master_netdev": cluster.master_netdev,
3756
      "volume_group_name": cluster.volume_group_name,
3757
      "file_storage_dir": cluster.file_storage_dir,
3758
      "maintain_node_health": cluster.maintain_node_health,
3759
      "ctime": cluster.ctime,
3760
      "mtime": cluster.mtime,
3761
      "uuid": cluster.uuid,
3762
      "tags": list(cluster.GetTags()),
3763
      "uid_pool": cluster.uid_pool,
3764
      }
3765

    
3766
    return result
3767

    
3768

    
3769
class LUQueryConfigValues(NoHooksLU):
3770
  """Return configuration values.
3771

3772
  """
3773
  _OP_REQP = []
3774
  REQ_BGL = False
3775
  _FIELDS_DYNAMIC = utils.FieldSet()
3776
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
3777
                                  "watcher_pause")
3778

    
3779
  def ExpandNames(self):
3780
    self.needed_locks = {}
3781

    
3782
    _CheckOutputFields(static=self._FIELDS_STATIC,
3783
                       dynamic=self._FIELDS_DYNAMIC,
3784
                       selected=self.op.output_fields)
3785

    
3786
  def CheckPrereq(self):
3787
    """No prerequisites.
3788

3789
    """
3790
    pass
3791

    
3792
  def Exec(self, feedback_fn):
3793
    """Dump a representation of the cluster config to the standard output.
3794

3795
    """
3796
    values = []
3797
    for field in self.op.output_fields:
3798
      if field == "cluster_name":
3799
        entry = self.cfg.GetClusterName()
3800
      elif field == "master_node":
3801
        entry = self.cfg.GetMasterNode()
3802
      elif field == "drain_flag":
3803
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
3804
      elif field == "watcher_pause":
3805
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
3806
      else:
3807
        raise errors.ParameterError(field)
3808
      values.append(entry)
3809
    return values
3810

    
3811

    
3812
class LUActivateInstanceDisks(NoHooksLU):
3813
  """Bring up an instance's disks.
3814

3815
  """
3816
  _OP_REQP = ["instance_name"]
3817
  REQ_BGL = False
3818

    
3819
  def ExpandNames(self):
3820
    self._ExpandAndLockInstance()
3821
    self.needed_locks[locking.LEVEL_NODE] = []
3822
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3823

    
3824
  def DeclareLocks(self, level):
3825
    if level == locking.LEVEL_NODE:
3826
      self._LockInstancesNodes()
3827

    
3828
  def CheckPrereq(self):
3829
    """Check prerequisites.
3830

3831
    This checks that the instance is in the cluster.
3832

3833
    """
3834
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3835
    assert self.instance is not None, \
3836
      "Cannot retrieve locked instance %s" % self.op.instance_name
3837
    _CheckNodeOnline(self, self.instance.primary_node)
3838
    if not hasattr(self.op, "ignore_size"):
3839
      self.op.ignore_size = False
3840

    
3841
  def Exec(self, feedback_fn):
3842
    """Activate the disks.
3843

3844
    """
3845
    disks_ok, disks_info = \
3846
              _AssembleInstanceDisks(self, self.instance,
3847
                                     ignore_size=self.op.ignore_size)
3848
    if not disks_ok:
3849
      raise errors.OpExecError("Cannot activate block devices")
3850

    
3851
    return disks_info
3852

    
3853

    
3854
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False,
3855
                           ignore_size=False):
3856
  """Prepare the block devices for an instance.
3857

3858
  This sets up the block devices on all nodes.
3859

3860
  @type lu: L{LogicalUnit}
3861
  @param lu: the logical unit on whose behalf we execute
3862
  @type instance: L{objects.Instance}
3863
  @param instance: the instance for whose disks we assemble
3864
  @type ignore_secondaries: boolean
3865
  @param ignore_secondaries: if true, errors on secondary nodes
3866
      won't result in an error return from the function
3867
  @type ignore_size: boolean
3868
  @param ignore_size: if true, the current known size of the disk
3869
      will not be used during the disk activation, useful for cases
3870
      when the size is wrong
3871
  @return: False if the operation failed, otherwise a list of
3872
      (host, instance_visible_name, node_visible_name)
3873
      with the mapping from node devices to instance devices
3874

3875
  """
3876
  device_info = []
3877
  disks_ok = True
3878
  iname = instance.name
3879
  # With the two passes mechanism we try to reduce the window of
3880
  # opportunity for the race condition of switching DRBD to primary
3881
  # before handshaking occured, but we do not eliminate it
3882

    
3883
  # The proper fix would be to wait (with some limits) until the
3884
  # connection has been made and drbd transitions from WFConnection
3885
  # into any other network-connected state (Connected, SyncTarget,
3886
  # SyncSource, etc.)
3887

    
3888
  # 1st pass, assemble on all nodes in secondary mode
3889
  for inst_disk in instance.disks:
3890
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3891
      if ignore_size:
3892
        node_disk = node_disk.Copy()
3893
        node_disk.UnsetSize()
3894
      lu.cfg.SetDiskID(node_disk, node)
3895
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
3896
      msg = result.fail_msg
3897
      if msg:
3898
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3899
                           " (is_primary=False, pass=1): %s",
3900
                           inst_disk.iv_name, node, msg)
3901
        if not ignore_secondaries:
3902
          disks_ok = False
3903

    
3904
  # FIXME: race condition on drbd migration to primary
3905

    
3906
  # 2nd pass, do only the primary node
3907
  for inst_disk in instance.disks:
3908
    dev_path = None
3909

    
3910
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3911
      if node != instance.primary_node:
3912
        continue
3913
      if ignore_size:
3914
        node_disk = node_disk.Copy()
3915
        node_disk.UnsetSize()
3916
      lu.cfg.SetDiskID(node_disk, node)
3917
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
3918
      msg = result.fail_msg
3919
      if msg:
3920
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3921
                           " (is_primary=True, pass=2): %s",
3922
                           inst_disk.iv_name, node, msg)
3923
        disks_ok = False
3924
      else:
3925
        dev_path = result.payload
3926

    
3927
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
3928

    
3929
  # leave the disks configured for the primary node
3930
  # this is a workaround that would be fixed better by
3931
  # improving the logical/physical id handling
3932
  for disk in instance.disks:
3933
    lu.cfg.SetDiskID(disk, instance.primary_node)
3934

    
3935
  return disks_ok, device_info
3936

    
3937

    
3938
def _StartInstanceDisks(lu, instance, force):
3939
  """Start the disks of an instance.
3940

3941
  """
3942
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
3943
                                           ignore_secondaries=force)
3944
  if not disks_ok:
3945
    _ShutdownInstanceDisks(lu, instance)
3946
    if force is not None and not force:
3947
      lu.proc.LogWarning("", hint="If the message above refers to a"
3948
                         " secondary node,"
3949
                         " you can retry the operation using '--force'.")
3950
    raise errors.OpExecError("Disk consistency error")
3951

    
3952

    
3953
class LUDeactivateInstanceDisks(NoHooksLU):
3954
  """Shutdown an instance's disks.
3955

3956
  """
3957
  _OP_REQP = ["instance_name"]
3958
  REQ_BGL = False
3959

    
3960
  def ExpandNames(self):
3961
    self._ExpandAndLockInstance()
3962
    self.needed_locks[locking.LEVEL_NODE] = []
3963
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3964

    
3965
  def DeclareLocks(self, level):
3966
    if level == locking.LEVEL_NODE:
3967
      self._LockInstancesNodes()
3968

    
3969
  def CheckPrereq(self):
3970
    """Check prerequisites.
3971

3972
    This checks that the instance is in the cluster.
3973

3974
    """
3975
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3976
    assert self.instance is not None, \
3977
      "Cannot retrieve locked instance %s" % self.op.instance_name
3978

    
3979
  def Exec(self, feedback_fn):
3980
    """Deactivate the disks
3981

3982
    """
3983
    instance = self.instance
3984
    _SafeShutdownInstanceDisks(self, instance)
3985

    
3986

    
3987
def _SafeShutdownInstanceDisks(lu, instance):
3988
  """Shutdown block devices of an instance.
3989

3990
  This function checks if an instance is running, before calling
3991
  _ShutdownInstanceDisks.
3992

3993
  """
3994
  _CheckInstanceDown(lu, instance, "cannot shutdown disks")
3995
  _ShutdownInstanceDisks(lu, instance)
3996

    
3997

    
3998
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
3999
  """Shutdown block devices of an instance.
4000

4001
  This does the shutdown on all nodes of the instance.
4002

4003
  If the ignore_primary is false, errors on the primary node are
4004
  ignored.
4005

4006
  """
4007
  all_result = True
4008
  for disk in instance.disks:
4009
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
4010
      lu.cfg.SetDiskID(top_disk, node)
4011
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
4012
      msg = result.fail_msg
4013
      if msg:
4014
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
4015
                      disk.iv_name, node, msg)
4016
        if not ignore_primary or node != instance.primary_node:
4017
          all_result = False
4018
  return all_result
4019

    
4020

    
4021
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
4022
  """Checks if a node has enough free memory.
4023

4024
  This function check if a given node has the needed amount of free
4025
  memory. In case the node has less memory or we cannot get the
4026
  information from the node, this function raise an OpPrereqError
4027
  exception.
4028

4029
  @type lu: C{LogicalUnit}
4030
  @param lu: a logical unit from which we get configuration data
4031
  @type node: C{str}
4032
  @param node: the node to check
4033
  @type reason: C{str}
4034
  @param reason: string to use in the error message
4035
  @type requested: C{int}
4036
  @param requested: the amount of memory in MiB to check for
4037
  @type hypervisor_name: C{str}
4038
  @param hypervisor_name: the hypervisor to ask for memory stats
4039
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or