Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ ff89a747

History | View | Annotate | Download (347.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0201
25

    
26
# W0201 since most LU attributes are defined in CheckPrereq or similar
27
# functions
28

    
29
import os
30
import os.path
31
import time
32
import re
33
import platform
34
import logging
35
import copy
36
import OpenSSL
37

    
38
from ganeti import ssh
39
from ganeti import utils
40
from ganeti import errors
41
from ganeti import hypervisor
42
from ganeti import locking
43
from ganeti import constants
44
from ganeti import objects
45
from ganeti import serializer
46
from ganeti import ssconf
47
from ganeti import uidpool
48
from ganeti import compat
49
from ganeti import masterd
50

    
51
import ganeti.masterd.instance # pylint: disable-msg=W0611
52

    
53

    
54
class LogicalUnit(object):
55
  """Logical Unit base class.
56

57
  Subclasses must follow these rules:
58
    - implement ExpandNames
59
    - implement CheckPrereq (except when tasklets are used)
60
    - implement Exec (except when tasklets are used)
61
    - implement BuildHooksEnv
62
    - redefine HPATH and HTYPE
63
    - optionally redefine their run requirements:
64
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
65

66
  Note that all commands require root permissions.
67

68
  @ivar dry_run_result: the value (if any) that will be returned to the caller
69
      in dry-run mode (signalled by opcode dry_run parameter)
70

71
  """
72
  HPATH = None
73
  HTYPE = None
74
  _OP_REQP = []
75
  REQ_BGL = True
76

    
77
  def __init__(self, processor, op, context, rpc):
78
    """Constructor for LogicalUnit.
79

80
    This needs to be overridden in derived classes in order to check op
81
    validity.
82

83
    """
84
    self.proc = processor
85
    self.op = op
86
    self.cfg = context.cfg
87
    self.context = context
88
    self.rpc = rpc
89
    # Dicts used to declare locking needs to mcpu
90
    self.needed_locks = None
91
    self.acquired_locks = {}
92
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
93
    self.add_locks = {}
94
    self.remove_locks = {}
95
    # Used to force good behavior when calling helper functions
96
    self.recalculate_locks = {}
97
    self.__ssh = None
98
    # logging
99
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
100
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
101
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
102
    # support for dry-run
103
    self.dry_run_result = None
104
    # support for generic debug attribute
105
    if (not hasattr(self.op, "debug_level") or
106
        not isinstance(self.op.debug_level, int)):
107
      self.op.debug_level = 0
108

    
109
    # Tasklets
110
    self.tasklets = None
111

    
112
    for attr_name in self._OP_REQP:
113
      attr_val = getattr(op, attr_name, None)
114
      if attr_val is None:
115
        raise errors.OpPrereqError("Required parameter '%s' missing" %
116
                                   attr_name, errors.ECODE_INVAL)
117

    
118
    self.CheckArguments()
119

    
120
  def __GetSSH(self):
121
    """Returns the SshRunner object
122

123
    """
124
    if not self.__ssh:
125
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
126
    return self.__ssh
127

    
128
  ssh = property(fget=__GetSSH)
129

    
130
  def CheckArguments(self):
131
    """Check syntactic validity for the opcode arguments.
132

133
    This method is for doing a simple syntactic check and ensure
134
    validity of opcode parameters, without any cluster-related
135
    checks. While the same can be accomplished in ExpandNames and/or
136
    CheckPrereq, doing these separate is better because:
137

138
      - ExpandNames is left as as purely a lock-related function
139
      - CheckPrereq is run after we have acquired locks (and possible
140
        waited for them)
141

142
    The function is allowed to change the self.op attribute so that
143
    later methods can no longer worry about missing parameters.
144

145
    """
146
    pass
147

    
148
  def ExpandNames(self):
149
    """Expand names for this LU.
150

151
    This method is called before starting to execute the opcode, and it should
152
    update all the parameters of the opcode to their canonical form (e.g. a
153
    short node name must be fully expanded after this method has successfully
154
    completed). This way locking, hooks, logging, ecc. can work correctly.
155

156
    LUs which implement this method must also populate the self.needed_locks
157
    member, as a dict with lock levels as keys, and a list of needed lock names
158
    as values. Rules:
159

160
      - use an empty dict if you don't need any lock
161
      - if you don't need any lock at a particular level omit that level
162
      - don't put anything for the BGL level
163
      - if you want all locks at a level use locking.ALL_SET as a value
164

165
    If you need to share locks (rather than acquire them exclusively) at one
166
    level you can modify self.share_locks, setting a true value (usually 1) for
167
    that level. By default locks are not shared.
168

169
    This function can also define a list of tasklets, which then will be
170
    executed in order instead of the usual LU-level CheckPrereq and Exec
171
    functions, if those are not defined by the LU.
172

173
    Examples::
174

175
      # Acquire all nodes and one instance
176
      self.needed_locks = {
177
        locking.LEVEL_NODE: locking.ALL_SET,
178
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
179
      }
180
      # Acquire just two nodes
181
      self.needed_locks = {
182
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
183
      }
184
      # Acquire no locks
185
      self.needed_locks = {} # No, you can't leave it to the default value None
186

187
    """
188
    # The implementation of this method is mandatory only if the new LU is
189
    # concurrent, so that old LUs don't need to be changed all at the same
190
    # time.
191
    if self.REQ_BGL:
192
      self.needed_locks = {} # Exclusive LUs don't need locks.
193
    else:
194
      raise NotImplementedError
195

    
196
  def DeclareLocks(self, level):
197
    """Declare LU locking needs for a level
198

199
    While most LUs can just declare their locking needs at ExpandNames time,
200
    sometimes there's the need to calculate some locks after having acquired
201
    the ones before. This function is called just before acquiring locks at a
202
    particular level, but after acquiring the ones at lower levels, and permits
203
    such calculations. It can be used to modify self.needed_locks, and by
204
    default it does nothing.
205

206
    This function is only called if you have something already set in
207
    self.needed_locks for the level.
208

209
    @param level: Locking level which is going to be locked
210
    @type level: member of ganeti.locking.LEVELS
211

212
    """
213

    
214
  def CheckPrereq(self):
215
    """Check prerequisites for this LU.
216

217
    This method should check that the prerequisites for the execution
218
    of this LU are fulfilled. It can do internode communication, but
219
    it should be idempotent - no cluster or system changes are
220
    allowed.
221

222
    The method should raise errors.OpPrereqError in case something is
223
    not fulfilled. Its return value is ignored.
224

225
    This method should also update all the parameters of the opcode to
226
    their canonical form if it hasn't been done by ExpandNames before.
227

228
    """
229
    if self.tasklets is not None:
230
      for (idx, tl) in enumerate(self.tasklets):
231
        logging.debug("Checking prerequisites for tasklet %s/%s",
232
                      idx + 1, len(self.tasklets))
233
        tl.CheckPrereq()
234
    else:
235
      raise NotImplementedError
236

    
237
  def Exec(self, feedback_fn):
238
    """Execute the LU.
239

240
    This method should implement the actual work. It should raise
241
    errors.OpExecError for failures that are somewhat dealt with in
242
    code, or expected.
243

244
    """
245
    if self.tasklets is not None:
246
      for (idx, tl) in enumerate(self.tasklets):
247
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
248
        tl.Exec(feedback_fn)
249
    else:
250
      raise NotImplementedError
251

    
252
  def BuildHooksEnv(self):
253
    """Build hooks environment for this LU.
254

255
    This method should return a three-node tuple consisting of: a dict
256
    containing the environment that will be used for running the
257
    specific hook for this LU, a list of node names on which the hook
258
    should run before the execution, and a list of node names on which
259
    the hook should run after the execution.
260

261
    The keys of the dict must not have 'GANETI_' prefixed as this will
262
    be handled in the hooks runner. Also note additional keys will be
263
    added by the hooks runner. If the LU doesn't define any
264
    environment, an empty dict (and not None) should be returned.
265

266
    No nodes should be returned as an empty list (and not None).
267

268
    Note that if the HPATH for a LU class is None, this function will
269
    not be called.
270

271
    """
272
    raise NotImplementedError
273

    
274
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
275
    """Notify the LU about the results of its hooks.
276

277
    This method is called every time a hooks phase is executed, and notifies
278
    the Logical Unit about the hooks' result. The LU can then use it to alter
279
    its result based on the hooks.  By default the method does nothing and the
280
    previous result is passed back unchanged but any LU can define it if it
281
    wants to use the local cluster hook-scripts somehow.
282

283
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
284
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
285
    @param hook_results: the results of the multi-node hooks rpc call
286
    @param feedback_fn: function used send feedback back to the caller
287
    @param lu_result: the previous Exec result this LU had, or None
288
        in the PRE phase
289
    @return: the new Exec result, based on the previous result
290
        and hook results
291

292
    """
293
    # API must be kept, thus we ignore the unused argument and could
294
    # be a function warnings
295
    # pylint: disable-msg=W0613,R0201
296
    return lu_result
297

    
298
  def _ExpandAndLockInstance(self):
299
    """Helper function to expand and lock an instance.
300

301
    Many LUs that work on an instance take its name in self.op.instance_name
302
    and need to expand it and then declare the expanded name for locking. This
303
    function does it, and then updates self.op.instance_name to the expanded
304
    name. It also initializes needed_locks as a dict, if this hasn't been done
305
    before.
306

307
    """
308
    if self.needed_locks is None:
309
      self.needed_locks = {}
310
    else:
311
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
312
        "_ExpandAndLockInstance called with instance-level locks set"
313
    self.op.instance_name = _ExpandInstanceName(self.cfg,
314
                                                self.op.instance_name)
315
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
316

    
317
  def _LockInstancesNodes(self, primary_only=False):
318
    """Helper function to declare instances' nodes for locking.
319

320
    This function should be called after locking one or more instances to lock
321
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
322
    with all primary or secondary nodes for instances already locked and
323
    present in self.needed_locks[locking.LEVEL_INSTANCE].
324

325
    It should be called from DeclareLocks, and for safety only works if
326
    self.recalculate_locks[locking.LEVEL_NODE] is set.
327

328
    In the future it may grow parameters to just lock some instance's nodes, or
329
    to just lock primaries or secondary nodes, if needed.
330

331
    If should be called in DeclareLocks in a way similar to::
332

333
      if level == locking.LEVEL_NODE:
334
        self._LockInstancesNodes()
335

336
    @type primary_only: boolean
337
    @param primary_only: only lock primary nodes of locked instances
338

339
    """
340
    assert locking.LEVEL_NODE in self.recalculate_locks, \
341
      "_LockInstancesNodes helper function called with no nodes to recalculate"
342

    
343
    # TODO: check if we're really been called with the instance locks held
344

    
345
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
346
    # future we might want to have different behaviors depending on the value
347
    # of self.recalculate_locks[locking.LEVEL_NODE]
348
    wanted_nodes = []
349
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
350
      instance = self.context.cfg.GetInstanceInfo(instance_name)
351
      wanted_nodes.append(instance.primary_node)
352
      if not primary_only:
353
        wanted_nodes.extend(instance.secondary_nodes)
354

    
355
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
356
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
357
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
358
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
359

    
360
    del self.recalculate_locks[locking.LEVEL_NODE]
361

    
362

    
363
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
364
  """Simple LU which runs no hooks.
365

366
  This LU is intended as a parent for other LogicalUnits which will
367
  run no hooks, in order to reduce duplicate code.
368

369
  """
370
  HPATH = None
371
  HTYPE = None
372

    
373
  def BuildHooksEnv(self):
374
    """Empty BuildHooksEnv for NoHooksLu.
375

376
    This just raises an error.
377

378
    """
379
    assert False, "BuildHooksEnv called for NoHooksLUs"
380

    
381

    
382
class Tasklet:
383
  """Tasklet base class.
384

385
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
386
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
387
  tasklets know nothing about locks.
388

389
  Subclasses must follow these rules:
390
    - Implement CheckPrereq
391
    - Implement Exec
392

393
  """
394
  def __init__(self, lu):
395
    self.lu = lu
396

    
397
    # Shortcuts
398
    self.cfg = lu.cfg
399
    self.rpc = lu.rpc
400

    
401
  def CheckPrereq(self):
402
    """Check prerequisites for this tasklets.
403

404
    This method should check whether the prerequisites for the execution of
405
    this tasklet are fulfilled. It can do internode communication, but it
406
    should be idempotent - no cluster or system changes are allowed.
407

408
    The method should raise errors.OpPrereqError in case something is not
409
    fulfilled. Its return value is ignored.
410

411
    This method should also update all parameters to their canonical form if it
412
    hasn't been done before.
413

414
    """
415
    raise NotImplementedError
416

    
417
  def Exec(self, feedback_fn):
418
    """Execute the tasklet.
419

420
    This method should implement the actual work. It should raise
421
    errors.OpExecError for failures that are somewhat dealt with in code, or
422
    expected.
423

424
    """
425
    raise NotImplementedError
426

    
427

    
428
def _GetWantedNodes(lu, nodes):
429
  """Returns list of checked and expanded node names.
430

431
  @type lu: L{LogicalUnit}
432
  @param lu: the logical unit on whose behalf we execute
433
  @type nodes: list
434
  @param nodes: list of node names or None for all nodes
435
  @rtype: list
436
  @return: the list of nodes, sorted
437
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
438

439
  """
440
  if not isinstance(nodes, list):
441
    raise errors.OpPrereqError("Invalid argument type 'nodes'",
442
                               errors.ECODE_INVAL)
443

    
444
  if not nodes:
445
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
446
      " non-empty list of nodes whose name is to be expanded.")
447

    
448
  wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes]
449
  return utils.NiceSort(wanted)
450

    
451

    
452
def _GetWantedInstances(lu, instances):
453
  """Returns list of checked and expanded instance names.
454

455
  @type lu: L{LogicalUnit}
456
  @param lu: the logical unit on whose behalf we execute
457
  @type instances: list
458
  @param instances: list of instance names or None for all instances
459
  @rtype: list
460
  @return: the list of instances, sorted
461
  @raise errors.OpPrereqError: if the instances parameter is wrong type
462
  @raise errors.OpPrereqError: if any of the passed instances is not found
463

464
  """
465
  if not isinstance(instances, list):
466
    raise errors.OpPrereqError("Invalid argument type 'instances'",
467
                               errors.ECODE_INVAL)
468

    
469
  if instances:
470
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
471
  else:
472
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
473
  return wanted
474

    
475

    
476
def _CheckOutputFields(static, dynamic, selected):
477
  """Checks whether all selected fields are valid.
478

479
  @type static: L{utils.FieldSet}
480
  @param static: static fields set
481
  @type dynamic: L{utils.FieldSet}
482
  @param dynamic: dynamic fields set
483

484
  """
485
  f = utils.FieldSet()
486
  f.Extend(static)
487
  f.Extend(dynamic)
488

    
489
  delta = f.NonMatching(selected)
490
  if delta:
491
    raise errors.OpPrereqError("Unknown output fields selected: %s"
492
                               % ",".join(delta), errors.ECODE_INVAL)
493

    
494

    
495
def _CheckBooleanOpField(op, name):
496
  """Validates boolean opcode parameters.
497

498
  This will ensure that an opcode parameter is either a boolean value,
499
  or None (but that it always exists).
500

501
  """
502
  val = getattr(op, name, None)
503
  if not (val is None or isinstance(val, bool)):
504
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
505
                               (name, str(val)), errors.ECODE_INVAL)
506
  setattr(op, name, val)
507

    
508

    
509
def _CheckGlobalHvParams(params):
510
  """Validates that given hypervisor params are not global ones.
511

512
  This will ensure that instances don't get customised versions of
513
  global params.
514

515
  """
516
  used_globals = constants.HVC_GLOBALS.intersection(params)
517
  if used_globals:
518
    msg = ("The following hypervisor parameters are global and cannot"
519
           " be customized at instance level, please modify them at"
520
           " cluster level: %s" % utils.CommaJoin(used_globals))
521
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
522

    
523

    
524
def _CheckNodeOnline(lu, node):
525
  """Ensure that a given node is online.
526

527
  @param lu: the LU on behalf of which we make the check
528
  @param node: the node to check
529
  @raise errors.OpPrereqError: if the node is offline
530

531
  """
532
  if lu.cfg.GetNodeInfo(node).offline:
533
    raise errors.OpPrereqError("Can't use offline node %s" % node,
534
                               errors.ECODE_INVAL)
535

    
536

    
537
def _CheckNodeNotDrained(lu, node):
538
  """Ensure that a given node is not drained.
539

540
  @param lu: the LU on behalf of which we make the check
541
  @param node: the node to check
542
  @raise errors.OpPrereqError: if the node is drained
543

544
  """
545
  if lu.cfg.GetNodeInfo(node).drained:
546
    raise errors.OpPrereqError("Can't use drained node %s" % node,
547
                               errors.ECODE_INVAL)
548

    
549

    
550
def _CheckNodeHasOS(lu, node, os_name, force_variant):
551
  """Ensure that a node supports a given OS.
552

553
  @param lu: the LU on behalf of which we make the check
554
  @param node: the node to check
555
  @param os_name: the OS to query about
556
  @param force_variant: whether to ignore variant errors
557
  @raise errors.OpPrereqError: if the node is not supporting the OS
558

559
  """
560
  result = lu.rpc.call_os_get(node, os_name)
561
  result.Raise("OS '%s' not in supported OS list for node %s" %
562
               (os_name, node),
563
               prereq=True, ecode=errors.ECODE_INVAL)
564
  if not force_variant:
565
    _CheckOSVariant(result.payload, os_name)
566

    
567

    
568
def _RequireFileStorage():
569
  """Checks that file storage is enabled.
570

571
  @raise errors.OpPrereqError: when file storage is disabled
572

573
  """
574
  if not constants.ENABLE_FILE_STORAGE:
575
    raise errors.OpPrereqError("File storage disabled at configure time",
576
                               errors.ECODE_INVAL)
577

    
578

    
579
def _CheckDiskTemplate(template):
580
  """Ensure a given disk template is valid.
581

582
  """
583
  if template not in constants.DISK_TEMPLATES:
584
    msg = ("Invalid disk template name '%s', valid templates are: %s" %
585
           (template, utils.CommaJoin(constants.DISK_TEMPLATES)))
586
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
587
  if template == constants.DT_FILE:
588
    _RequireFileStorage()
589

    
590

    
591
def _CheckStorageType(storage_type):
592
  """Ensure a given storage type is valid.
593

594
  """
595
  if storage_type not in constants.VALID_STORAGE_TYPES:
596
    raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
597
                               errors.ECODE_INVAL)
598
  if storage_type == constants.ST_FILE:
599
    _RequireFileStorage()
600

    
601

    
602
def _GetClusterDomainSecret():
603
  """Reads the cluster domain secret.
604

605
  """
606
  return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
607
                               strict=True)
608

    
609

    
610
def _CheckInstanceDown(lu, instance, reason):
611
  """Ensure that an instance is not running."""
612
  if instance.admin_up:
613
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
614
                               (instance.name, reason), errors.ECODE_STATE)
615

    
616
  pnode = instance.primary_node
617
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
618
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
619
              prereq=True, ecode=errors.ECODE_ENVIRON)
620

    
621
  if instance.name in ins_l.payload:
622
    raise errors.OpPrereqError("Instance %s is running, %s" %
623
                               (instance.name, reason), errors.ECODE_STATE)
624

    
625

    
626
def _ExpandItemName(fn, name, kind):
627
  """Expand an item name.
628

629
  @param fn: the function to use for expansion
630
  @param name: requested item name
631
  @param kind: text description ('Node' or 'Instance')
632
  @return: the resolved (full) name
633
  @raise errors.OpPrereqError: if the item is not found
634

635
  """
636
  full_name = fn(name)
637
  if full_name is None:
638
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
639
                               errors.ECODE_NOENT)
640
  return full_name
641

    
642

    
643
def _ExpandNodeName(cfg, name):
644
  """Wrapper over L{_ExpandItemName} for nodes."""
645
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
646

    
647

    
648
def _ExpandInstanceName(cfg, name):
649
  """Wrapper over L{_ExpandItemName} for instance."""
650
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
651

    
652

    
653
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
654
                          memory, vcpus, nics, disk_template, disks,
655
                          bep, hvp, hypervisor_name):
656
  """Builds instance related env variables for hooks
657

658
  This builds the hook environment from individual variables.
659

660
  @type name: string
661
  @param name: the name of the instance
662
  @type primary_node: string
663
  @param primary_node: the name of the instance's primary node
664
  @type secondary_nodes: list
665
  @param secondary_nodes: list of secondary nodes as strings
666
  @type os_type: string
667
  @param os_type: the name of the instance's OS
668
  @type status: boolean
669
  @param status: the should_run status of the instance
670
  @type memory: string
671
  @param memory: the memory size of the instance
672
  @type vcpus: string
673
  @param vcpus: the count of VCPUs the instance has
674
  @type nics: list
675
  @param nics: list of tuples (ip, mac, mode, link) representing
676
      the NICs the instance has
677
  @type disk_template: string
678
  @param disk_template: the disk template of the instance
679
  @type disks: list
680
  @param disks: the list of (size, mode) pairs
681
  @type bep: dict
682
  @param bep: the backend parameters for the instance
683
  @type hvp: dict
684
  @param hvp: the hypervisor parameters for the instance
685
  @type hypervisor_name: string
686
  @param hypervisor_name: the hypervisor for the instance
687
  @rtype: dict
688
  @return: the hook environment for this instance
689

690
  """
691
  if status:
692
    str_status = "up"
693
  else:
694
    str_status = "down"
695
  env = {
696
    "OP_TARGET": name,
697
    "INSTANCE_NAME": name,
698
    "INSTANCE_PRIMARY": primary_node,
699
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
700
    "INSTANCE_OS_TYPE": os_type,
701
    "INSTANCE_STATUS": str_status,
702
    "INSTANCE_MEMORY": memory,
703
    "INSTANCE_VCPUS": vcpus,
704
    "INSTANCE_DISK_TEMPLATE": disk_template,
705
    "INSTANCE_HYPERVISOR": hypervisor_name,
706
  }
707

    
708
  if nics:
709
    nic_count = len(nics)
710
    for idx, (ip, mac, mode, link) in enumerate(nics):
711
      if ip is None:
712
        ip = ""
713
      env["INSTANCE_NIC%d_IP" % idx] = ip
714
      env["INSTANCE_NIC%d_MAC" % idx] = mac
715
      env["INSTANCE_NIC%d_MODE" % idx] = mode
716
      env["INSTANCE_NIC%d_LINK" % idx] = link
717
      if mode == constants.NIC_MODE_BRIDGED:
718
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
719
  else:
720
    nic_count = 0
721

    
722
  env["INSTANCE_NIC_COUNT"] = nic_count
723

    
724
  if disks:
725
    disk_count = len(disks)
726
    for idx, (size, mode) in enumerate(disks):
727
      env["INSTANCE_DISK%d_SIZE" % idx] = size
728
      env["INSTANCE_DISK%d_MODE" % idx] = mode
729
  else:
730
    disk_count = 0
731

    
732
  env["INSTANCE_DISK_COUNT"] = disk_count
733

    
734
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
735
    for key, value in source.items():
736
      env["INSTANCE_%s_%s" % (kind, key)] = value
737

    
738
  return env
739

    
740

    
741
def _NICListToTuple(lu, nics):
742
  """Build a list of nic information tuples.
743

744
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
745
  value in LUQueryInstanceData.
746

747
  @type lu:  L{LogicalUnit}
748
  @param lu: the logical unit on whose behalf we execute
749
  @type nics: list of L{objects.NIC}
750
  @param nics: list of nics to convert to hooks tuples
751

752
  """
753
  hooks_nics = []
754
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
755
  for nic in nics:
756
    ip = nic.ip
757
    mac = nic.mac
758
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
759
    mode = filled_params[constants.NIC_MODE]
760
    link = filled_params[constants.NIC_LINK]
761
    hooks_nics.append((ip, mac, mode, link))
762
  return hooks_nics
763

    
764

    
765
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
766
  """Builds instance related env variables for hooks from an object.
767

768
  @type lu: L{LogicalUnit}
769
  @param lu: the logical unit on whose behalf we execute
770
  @type instance: L{objects.Instance}
771
  @param instance: the instance for which we should build the
772
      environment
773
  @type override: dict
774
  @param override: dictionary with key/values that will override
775
      our values
776
  @rtype: dict
777
  @return: the hook environment dictionary
778

779
  """
780
  cluster = lu.cfg.GetClusterInfo()
781
  bep = cluster.FillBE(instance)
782
  hvp = cluster.FillHV(instance)
783
  args = {
784
    'name': instance.name,
785
    'primary_node': instance.primary_node,
786
    'secondary_nodes': instance.secondary_nodes,
787
    'os_type': instance.os,
788
    'status': instance.admin_up,
789
    'memory': bep[constants.BE_MEMORY],
790
    'vcpus': bep[constants.BE_VCPUS],
791
    'nics': _NICListToTuple(lu, instance.nics),
792
    'disk_template': instance.disk_template,
793
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
794
    'bep': bep,
795
    'hvp': hvp,
796
    'hypervisor_name': instance.hypervisor,
797
  }
798
  if override:
799
    args.update(override)
800
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
801

    
802

    
803
def _AdjustCandidatePool(lu, exceptions):
804
  """Adjust the candidate pool after node operations.
805

806
  """
807
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
808
  if mod_list:
809
    lu.LogInfo("Promoted nodes to master candidate role: %s",
810
               utils.CommaJoin(node.name for node in mod_list))
811
    for name in mod_list:
812
      lu.context.ReaddNode(name)
813
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
814
  if mc_now > mc_max:
815
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
816
               (mc_now, mc_max))
817

    
818

    
819
def _DecideSelfPromotion(lu, exceptions=None):
820
  """Decide whether I should promote myself as a master candidate.
821

822
  """
823
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
824
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
825
  # the new node will increase mc_max with one, so:
826
  mc_should = min(mc_should + 1, cp_size)
827
  return mc_now < mc_should
828

    
829

    
830
def _CheckNicsBridgesExist(lu, target_nics, target_node,
831
                               profile=constants.PP_DEFAULT):
832
  """Check that the brigdes needed by a list of nics exist.
833

834
  """
835
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
836
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
837
                for nic in target_nics]
838
  brlist = [params[constants.NIC_LINK] for params in paramslist
839
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
840
  if brlist:
841
    result = lu.rpc.call_bridges_exist(target_node, brlist)
842
    result.Raise("Error checking bridges on destination node '%s'" %
843
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
844

    
845

    
846
def _CheckInstanceBridgesExist(lu, instance, node=None):
847
  """Check that the brigdes needed by an instance exist.
848

849
  """
850
  if node is None:
851
    node = instance.primary_node
852
  _CheckNicsBridgesExist(lu, instance.nics, node)
853

    
854

    
855
def _CheckOSVariant(os_obj, name):
856
  """Check whether an OS name conforms to the os variants specification.
857

858
  @type os_obj: L{objects.OS}
859
  @param os_obj: OS object to check
860
  @type name: string
861
  @param name: OS name passed by the user, to check for validity
862

863
  """
864
  if not os_obj.supported_variants:
865
    return
866
  try:
867
    variant = name.split("+", 1)[1]
868
  except IndexError:
869
    raise errors.OpPrereqError("OS name must include a variant",
870
                               errors.ECODE_INVAL)
871

    
872
  if variant not in os_obj.supported_variants:
873
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
874

    
875

    
876
def _GetNodeInstancesInner(cfg, fn):
877
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
878

    
879

    
880
def _GetNodeInstances(cfg, node_name):
881
  """Returns a list of all primary and secondary instances on a node.
882

883
  """
884

    
885
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
886

    
887

    
888
def _GetNodePrimaryInstances(cfg, node_name):
889
  """Returns primary instances on a node.
890

891
  """
892
  return _GetNodeInstancesInner(cfg,
893
                                lambda inst: node_name == inst.primary_node)
894

    
895

    
896
def _GetNodeSecondaryInstances(cfg, node_name):
897
  """Returns secondary instances on a node.
898

899
  """
900
  return _GetNodeInstancesInner(cfg,
901
                                lambda inst: node_name in inst.secondary_nodes)
902

    
903

    
904
def _GetStorageTypeArgs(cfg, storage_type):
905
  """Returns the arguments for a storage type.
906

907
  """
908
  # Special case for file storage
909
  if storage_type == constants.ST_FILE:
910
    # storage.FileStorage wants a list of storage directories
911
    return [[cfg.GetFileStorageDir()]]
912

    
913
  return []
914

    
915

    
916
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
917
  faulty = []
918

    
919
  for dev in instance.disks:
920
    cfg.SetDiskID(dev, node_name)
921

    
922
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
923
  result.Raise("Failed to get disk status from node %s" % node_name,
924
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
925

    
926
  for idx, bdev_status in enumerate(result.payload):
927
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
928
      faulty.append(idx)
929

    
930
  return faulty
931

    
932

    
933
class LUPostInitCluster(LogicalUnit):
934
  """Logical unit for running hooks after cluster initialization.
935

936
  """
937
  HPATH = "cluster-init"
938
  HTYPE = constants.HTYPE_CLUSTER
939
  _OP_REQP = []
940

    
941
  def BuildHooksEnv(self):
942
    """Build hooks env.
943

944
    """
945
    env = {"OP_TARGET": self.cfg.GetClusterName()}
946
    mn = self.cfg.GetMasterNode()
947
    return env, [], [mn]
948

    
949
  def CheckPrereq(self):
950
    """No prerequisites to check.
951

952
    """
953
    return True
954

    
955
  def Exec(self, feedback_fn):
956
    """Nothing to do.
957

958
    """
959
    return True
960

    
961

    
962
class LUDestroyCluster(LogicalUnit):
963
  """Logical unit for destroying the cluster.
964

965
  """
966
  HPATH = "cluster-destroy"
967
  HTYPE = constants.HTYPE_CLUSTER
968
  _OP_REQP = []
969

    
970
  def BuildHooksEnv(self):
971
    """Build hooks env.
972

973
    """
974
    env = {"OP_TARGET": self.cfg.GetClusterName()}
975
    return env, [], []
976

    
977
  def CheckPrereq(self):
978
    """Check prerequisites.
979

980
    This checks whether the cluster is empty.
981

982
    Any errors are signaled by raising errors.OpPrereqError.
983

984
    """
985
    master = self.cfg.GetMasterNode()
986

    
987
    nodelist = self.cfg.GetNodeList()
988
    if len(nodelist) != 1 or nodelist[0] != master:
989
      raise errors.OpPrereqError("There are still %d node(s) in"
990
                                 " this cluster." % (len(nodelist) - 1),
991
                                 errors.ECODE_INVAL)
992
    instancelist = self.cfg.GetInstanceList()
993
    if instancelist:
994
      raise errors.OpPrereqError("There are still %d instance(s) in"
995
                                 " this cluster." % len(instancelist),
996
                                 errors.ECODE_INVAL)
997

    
998
  def Exec(self, feedback_fn):
999
    """Destroys the cluster.
1000

1001
    """
1002
    master = self.cfg.GetMasterNode()
1003
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
1004

    
1005
    # Run post hooks on master node before it's removed
1006
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1007
    try:
1008
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1009
    except:
1010
      # pylint: disable-msg=W0702
1011
      self.LogWarning("Errors occurred running hooks on %s" % master)
1012

    
1013
    result = self.rpc.call_node_stop_master(master, False)
1014
    result.Raise("Could not disable the master role")
1015

    
1016
    if modify_ssh_setup:
1017
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1018
      utils.CreateBackup(priv_key)
1019
      utils.CreateBackup(pub_key)
1020

    
1021
    return master
1022

    
1023

    
1024
def _VerifyCertificate(filename):
1025
  """Verifies a certificate for LUVerifyCluster.
1026

1027
  @type filename: string
1028
  @param filename: Path to PEM file
1029

1030
  """
1031
  try:
1032
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1033
                                           utils.ReadFile(filename))
1034
  except Exception, err: # pylint: disable-msg=W0703
1035
    return (LUVerifyCluster.ETYPE_ERROR,
1036
            "Failed to load X509 certificate %s: %s" % (filename, err))
1037

    
1038
  (errcode, msg) = \
1039
    utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1040
                                constants.SSL_CERT_EXPIRATION_ERROR)
1041

    
1042
  if msg:
1043
    fnamemsg = "While verifying %s: %s" % (filename, msg)
1044
  else:
1045
    fnamemsg = None
1046

    
1047
  if errcode is None:
1048
    return (None, fnamemsg)
1049
  elif errcode == utils.CERT_WARNING:
1050
    return (LUVerifyCluster.ETYPE_WARNING, fnamemsg)
1051
  elif errcode == utils.CERT_ERROR:
1052
    return (LUVerifyCluster.ETYPE_ERROR, fnamemsg)
1053

    
1054
  raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1055

    
1056

    
1057
class LUVerifyCluster(LogicalUnit):
1058
  """Verifies the cluster status.
1059

1060
  """
1061
  HPATH = "cluster-verify"
1062
  HTYPE = constants.HTYPE_CLUSTER
1063
  _OP_REQP = ["skip_checks", "verbose", "error_codes", "debug_simulate_errors"]
1064
  REQ_BGL = False
1065

    
1066
  TCLUSTER = "cluster"
1067
  TNODE = "node"
1068
  TINSTANCE = "instance"
1069

    
1070
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1071
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1072
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1073
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1074
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1075
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1076
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1077
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1078
  ENODEDRBD = (TNODE, "ENODEDRBD")
1079
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1080
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1081
  ENODEHV = (TNODE, "ENODEHV")
1082
  ENODELVM = (TNODE, "ENODELVM")
1083
  ENODEN1 = (TNODE, "ENODEN1")
1084
  ENODENET = (TNODE, "ENODENET")
1085
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1086
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1087
  ENODERPC = (TNODE, "ENODERPC")
1088
  ENODESSH = (TNODE, "ENODESSH")
1089
  ENODEVERSION = (TNODE, "ENODEVERSION")
1090
  ENODESETUP = (TNODE, "ENODESETUP")
1091
  ENODETIME = (TNODE, "ENODETIME")
1092

    
1093
  ETYPE_FIELD = "code"
1094
  ETYPE_ERROR = "ERROR"
1095
  ETYPE_WARNING = "WARNING"
1096

    
1097
  class NodeImage(object):
1098
    """A class representing the logical and physical status of a node.
1099

1100
    @ivar volumes: a structure as returned from
1101
        L{ganeti.backend.GetVolumeList} (runtime)
1102
    @ivar instances: a list of running instances (runtime)
1103
    @ivar pinst: list of configured primary instances (config)
1104
    @ivar sinst: list of configured secondary instances (config)
1105
    @ivar sbp: diction of {secondary-node: list of instances} of all peers
1106
        of this node (config)
1107
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1108
    @ivar dfree: free disk, as reported by the node (runtime)
1109
    @ivar offline: the offline status (config)
1110
    @type rpc_fail: boolean
1111
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1112
        not whether the individual keys were correct) (runtime)
1113
    @type lvm_fail: boolean
1114
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1115
    @type hyp_fail: boolean
1116
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1117
    @type ghost: boolean
1118
    @ivar ghost: whether this is a known node or not (config)
1119

1120
    """
1121
    def __init__(self, offline=False):
1122
      self.volumes = {}
1123
      self.instances = []
1124
      self.pinst = []
1125
      self.sinst = []
1126
      self.sbp = {}
1127
      self.mfree = 0
1128
      self.dfree = 0
1129
      self.offline = offline
1130
      self.rpc_fail = False
1131
      self.lvm_fail = False
1132
      self.hyp_fail = False
1133
      self.ghost = False
1134

    
1135
  def ExpandNames(self):
1136
    self.needed_locks = {
1137
      locking.LEVEL_NODE: locking.ALL_SET,
1138
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1139
    }
1140
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1141

    
1142
  def _Error(self, ecode, item, msg, *args, **kwargs):
1143
    """Format an error message.
1144

1145
    Based on the opcode's error_codes parameter, either format a
1146
    parseable error code, or a simpler error string.
1147

1148
    This must be called only from Exec and functions called from Exec.
1149

1150
    """
1151
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1152
    itype, etxt = ecode
1153
    # first complete the msg
1154
    if args:
1155
      msg = msg % args
1156
    # then format the whole message
1157
    if self.op.error_codes:
1158
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1159
    else:
1160
      if item:
1161
        item = " " + item
1162
      else:
1163
        item = ""
1164
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1165
    # and finally report it via the feedback_fn
1166
    self._feedback_fn("  - %s" % msg)
1167

    
1168
  def _ErrorIf(self, cond, *args, **kwargs):
1169
    """Log an error message if the passed condition is True.
1170

1171
    """
1172
    cond = bool(cond) or self.op.debug_simulate_errors
1173
    if cond:
1174
      self._Error(*args, **kwargs)
1175
    # do not mark the operation as failed for WARN cases only
1176
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1177
      self.bad = self.bad or cond
1178

    
1179
  def _VerifyNode(self, ninfo, nresult):
1180
    """Run multiple tests against a node.
1181

1182
    Test list:
1183

1184
      - compares ganeti version
1185
      - checks vg existence and size > 20G
1186
      - checks config file checksum
1187
      - checks ssh to other nodes
1188

1189
    @type ninfo: L{objects.Node}
1190
    @param ninfo: the node to check
1191
    @param nresult: the results from the node
1192
    @rtype: boolean
1193
    @return: whether overall this call was successful (and we can expect
1194
         reasonable values in the respose)
1195

1196
    """
1197
    node = ninfo.name
1198
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1199

    
1200
    # main result, nresult should be a non-empty dict
1201
    test = not nresult or not isinstance(nresult, dict)
1202
    _ErrorIf(test, self.ENODERPC, node,
1203
                  "unable to verify node: no data returned")
1204
    if test:
1205
      return False
1206

    
1207
    # compares ganeti version
1208
    local_version = constants.PROTOCOL_VERSION
1209
    remote_version = nresult.get("version", None)
1210
    test = not (remote_version and
1211
                isinstance(remote_version, (list, tuple)) and
1212
                len(remote_version) == 2)
1213
    _ErrorIf(test, self.ENODERPC, node,
1214
             "connection to node returned invalid data")
1215
    if test:
1216
      return False
1217

    
1218
    test = local_version != remote_version[0]
1219
    _ErrorIf(test, self.ENODEVERSION, node,
1220
             "incompatible protocol versions: master %s,"
1221
             " node %s", local_version, remote_version[0])
1222
    if test:
1223
      return False
1224

    
1225
    # node seems compatible, we can actually try to look into its results
1226

    
1227
    # full package version
1228
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1229
                  self.ENODEVERSION, node,
1230
                  "software version mismatch: master %s, node %s",
1231
                  constants.RELEASE_VERSION, remote_version[1],
1232
                  code=self.ETYPE_WARNING)
1233

    
1234
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1235
    if isinstance(hyp_result, dict):
1236
      for hv_name, hv_result in hyp_result.iteritems():
1237
        test = hv_result is not None
1238
        _ErrorIf(test, self.ENODEHV, node,
1239
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1240

    
1241

    
1242
    test = nresult.get(constants.NV_NODESETUP,
1243
                           ["Missing NODESETUP results"])
1244
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1245
             "; ".join(test))
1246

    
1247
    return True
1248

    
1249
  def _VerifyNodeTime(self, ninfo, nresult,
1250
                      nvinfo_starttime, nvinfo_endtime):
1251
    """Check the node time.
1252

1253
    @type ninfo: L{objects.Node}
1254
    @param ninfo: the node to check
1255
    @param nresult: the remote results for the node
1256
    @param nvinfo_starttime: the start time of the RPC call
1257
    @param nvinfo_endtime: the end time of the RPC call
1258

1259
    """
1260
    node = ninfo.name
1261
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1262

    
1263
    ntime = nresult.get(constants.NV_TIME, None)
1264
    try:
1265
      ntime_merged = utils.MergeTime(ntime)
1266
    except (ValueError, TypeError):
1267
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1268
      return
1269

    
1270
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1271
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1272
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1273
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1274
    else:
1275
      ntime_diff = None
1276

    
1277
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1278
             "Node time diverges by at least %s from master node time",
1279
             ntime_diff)
1280

    
1281
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1282
    """Check the node time.
1283

1284
    @type ninfo: L{objects.Node}
1285
    @param ninfo: the node to check
1286
    @param nresult: the remote results for the node
1287
    @param vg_name: the configured VG name
1288

1289
    """
1290
    if vg_name is None:
1291
      return
1292

    
1293
    node = ninfo.name
1294
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1295

    
1296
    # checks vg existence and size > 20G
1297
    vglist = nresult.get(constants.NV_VGLIST, None)
1298
    test = not vglist
1299
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1300
    if not test:
1301
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1302
                                            constants.MIN_VG_SIZE)
1303
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1304

    
1305
    # check pv names
1306
    pvlist = nresult.get(constants.NV_PVLIST, None)
1307
    test = pvlist is None
1308
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1309
    if not test:
1310
      # check that ':' is not present in PV names, since it's a
1311
      # special character for lvcreate (denotes the range of PEs to
1312
      # use on the PV)
1313
      for _, pvname, owner_vg in pvlist:
1314
        test = ":" in pvname
1315
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1316
                 " '%s' of VG '%s'", pvname, owner_vg)
1317

    
1318
  def _VerifyNodeNetwork(self, ninfo, nresult):
1319
    """Check the node time.
1320

1321
    @type ninfo: L{objects.Node}
1322
    @param ninfo: the node to check
1323
    @param nresult: the remote results for the node
1324

1325
    """
1326
    node = ninfo.name
1327
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1328

    
1329
    test = constants.NV_NODELIST not in nresult
1330
    _ErrorIf(test, self.ENODESSH, node,
1331
             "node hasn't returned node ssh connectivity data")
1332
    if not test:
1333
      if nresult[constants.NV_NODELIST]:
1334
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1335
          _ErrorIf(True, self.ENODESSH, node,
1336
                   "ssh communication with node '%s': %s", a_node, a_msg)
1337

    
1338
    test = constants.NV_NODENETTEST not in nresult
1339
    _ErrorIf(test, self.ENODENET, node,
1340
             "node hasn't returned node tcp connectivity data")
1341
    if not test:
1342
      if nresult[constants.NV_NODENETTEST]:
1343
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1344
        for anode in nlist:
1345
          _ErrorIf(True, self.ENODENET, node,
1346
                   "tcp communication with node '%s': %s",
1347
                   anode, nresult[constants.NV_NODENETTEST][anode])
1348

    
1349
    test = constants.NV_MASTERIP not in nresult
1350
    _ErrorIf(test, self.ENODENET, node,
1351
             "node hasn't returned node master IP reachability data")
1352
    if not test:
1353
      if not nresult[constants.NV_MASTERIP]:
1354
        if node == self.master_node:
1355
          msg = "the master node cannot reach the master IP (not configured?)"
1356
        else:
1357
          msg = "cannot reach the master IP"
1358
        _ErrorIf(True, self.ENODENET, node, msg)
1359

    
1360

    
1361
  def _VerifyInstance(self, instance, instanceconfig, node_image):
1362
    """Verify an instance.
1363

1364
    This function checks to see if the required block devices are
1365
    available on the instance's node.
1366

1367
    """
1368
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1369
    node_current = instanceconfig.primary_node
1370

    
1371
    node_vol_should = {}
1372
    instanceconfig.MapLVsByNode(node_vol_should)
1373

    
1374
    for node in node_vol_should:
1375
      n_img = node_image[node]
1376
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1377
        # ignore missing volumes on offline or broken nodes
1378
        continue
1379
      for volume in node_vol_should[node]:
1380
        test = volume not in n_img.volumes
1381
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1382
                 "volume %s missing on node %s", volume, node)
1383

    
1384
    if instanceconfig.admin_up:
1385
      pri_img = node_image[node_current]
1386
      test = instance not in pri_img.instances and not pri_img.offline
1387
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1388
               "instance not running on its primary node %s",
1389
               node_current)
1390

    
1391
    for node, n_img in node_image.items():
1392
      if (not node == node_current):
1393
        test = instance in n_img.instances
1394
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1395
                 "instance should not run on node %s", node)
1396

    
1397
  def _VerifyOrphanVolumes(self, node_vol_should, node_image):
1398
    """Verify if there are any unknown volumes in the cluster.
1399

1400
    The .os, .swap and backup volumes are ignored. All other volumes are
1401
    reported as unknown.
1402

1403
    """
1404
    for node, n_img in node_image.items():
1405
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1406
        # skip non-healthy nodes
1407
        continue
1408
      for volume in n_img.volumes:
1409
        test = (node not in node_vol_should or
1410
                volume not in node_vol_should[node])
1411
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1412
                      "volume %s is unknown", volume)
1413

    
1414
  def _VerifyOrphanInstances(self, instancelist, node_image):
1415
    """Verify the list of running instances.
1416

1417
    This checks what instances are running but unknown to the cluster.
1418

1419
    """
1420
    for node, n_img in node_image.items():
1421
      for o_inst in n_img.instances:
1422
        test = o_inst not in instancelist
1423
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1424
                      "instance %s on node %s should not exist", o_inst, node)
1425

    
1426
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1427
    """Verify N+1 Memory Resilience.
1428

1429
    Check that if one single node dies we can still start all the
1430
    instances it was primary for.
1431

1432
    """
1433
    for node, n_img in node_image.items():
1434
      # This code checks that every node which is now listed as
1435
      # secondary has enough memory to host all instances it is
1436
      # supposed to should a single other node in the cluster fail.
1437
      # FIXME: not ready for failover to an arbitrary node
1438
      # FIXME: does not support file-backed instances
1439
      # WARNING: we currently take into account down instances as well
1440
      # as up ones, considering that even if they're down someone
1441
      # might want to start them even in the event of a node failure.
1442
      for prinode, instances in n_img.sbp.items():
1443
        needed_mem = 0
1444
        for instance in instances:
1445
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1446
          if bep[constants.BE_AUTO_BALANCE]:
1447
            needed_mem += bep[constants.BE_MEMORY]
1448
        test = n_img.mfree < needed_mem
1449
        self._ErrorIf(test, self.ENODEN1, node,
1450
                      "not enough memory on to accommodate"
1451
                      " failovers should peer node %s fail", prinode)
1452

    
1453
  def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1454
                       master_files):
1455
    """Verifies and computes the node required file checksums.
1456

1457
    @type ninfo: L{objects.Node}
1458
    @param ninfo: the node to check
1459
    @param nresult: the remote results for the node
1460
    @param file_list: required list of files
1461
    @param local_cksum: dictionary of local files and their checksums
1462
    @param master_files: list of files that only masters should have
1463

1464
    """
1465
    node = ninfo.name
1466
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1467

    
1468
    remote_cksum = nresult.get(constants.NV_FILELIST, None)
1469
    test = not isinstance(remote_cksum, dict)
1470
    _ErrorIf(test, self.ENODEFILECHECK, node,
1471
             "node hasn't returned file checksum data")
1472
    if test:
1473
      return
1474

    
1475
    for file_name in file_list:
1476
      node_is_mc = ninfo.master_candidate
1477
      must_have = (file_name not in master_files) or node_is_mc
1478
      # missing
1479
      test1 = file_name not in remote_cksum
1480
      # invalid checksum
1481
      test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1482
      # existing and good
1483
      test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1484
      _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1485
               "file '%s' missing", file_name)
1486
      _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1487
               "file '%s' has wrong checksum", file_name)
1488
      # not candidate and this is not a must-have file
1489
      _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1490
               "file '%s' should not exist on non master"
1491
               " candidates (and the file is outdated)", file_name)
1492
      # all good, except non-master/non-must have combination
1493
      _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1494
               "file '%s' should not exist"
1495
               " on non master candidates", file_name)
1496

    
1497
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_map):
1498
    """Verifies and the node DRBD status.
1499

1500
    @type ninfo: L{objects.Node}
1501
    @param ninfo: the node to check
1502
    @param nresult: the remote results for the node
1503
    @param instanceinfo: the dict of instances
1504
    @param drbd_map: the DRBD map as returned by
1505
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1506

1507
    """
1508
    node = ninfo.name
1509
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1510

    
1511
    # compute the DRBD minors
1512
    node_drbd = {}
1513
    for minor, instance in drbd_map[node].items():
1514
      test = instance not in instanceinfo
1515
      _ErrorIf(test, self.ECLUSTERCFG, None,
1516
               "ghost instance '%s' in temporary DRBD map", instance)
1517
        # ghost instance should not be running, but otherwise we
1518
        # don't give double warnings (both ghost instance and
1519
        # unallocated minor in use)
1520
      if test:
1521
        node_drbd[minor] = (instance, False)
1522
      else:
1523
        instance = instanceinfo[instance]
1524
        node_drbd[minor] = (instance.name, instance.admin_up)
1525

    
1526
    # and now check them
1527
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
1528
    test = not isinstance(used_minors, (tuple, list))
1529
    _ErrorIf(test, self.ENODEDRBD, node,
1530
             "cannot parse drbd status file: %s", str(used_minors))
1531
    if test:
1532
      # we cannot check drbd status
1533
      return
1534

    
1535
    for minor, (iname, must_exist) in node_drbd.items():
1536
      test = minor not in used_minors and must_exist
1537
      _ErrorIf(test, self.ENODEDRBD, node,
1538
               "drbd minor %d of instance %s is not active", minor, iname)
1539
    for minor in used_minors:
1540
      test = minor not in node_drbd
1541
      _ErrorIf(test, self.ENODEDRBD, node,
1542
               "unallocated drbd minor %d is in use", minor)
1543

    
1544
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1545
    """Verifies and updates the node volume data.
1546

1547
    This function will update a L{NodeImage}'s internal structures
1548
    with data from the remote call.
1549

1550
    @type ninfo: L{objects.Node}
1551
    @param ninfo: the node to check
1552
    @param nresult: the remote results for the node
1553
    @param nimg: the node image object
1554
    @param vg_name: the configured VG name
1555

1556
    """
1557
    node = ninfo.name
1558
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1559

    
1560
    nimg.lvm_fail = True
1561
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1562
    if vg_name is None:
1563
      pass
1564
    elif isinstance(lvdata, basestring):
1565
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1566
               utils.SafeEncode(lvdata))
1567
    elif not isinstance(lvdata, dict):
1568
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1569
    else:
1570
      nimg.volumes = lvdata
1571
      nimg.lvm_fail = False
1572

    
1573
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1574
    """Verifies and updates the node instance list.
1575

1576
    If the listing was successful, then updates this node's instance
1577
    list. Otherwise, it marks the RPC call as failed for the instance
1578
    list key.
1579

1580
    @type ninfo: L{objects.Node}
1581
    @param ninfo: the node to check
1582
    @param nresult: the remote results for the node
1583
    @param nimg: the node image object
1584

1585
    """
1586
    idata = nresult.get(constants.NV_INSTANCELIST, None)
1587
    test = not isinstance(idata, list)
1588
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1589
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
1590
    if test:
1591
      nimg.hyp_fail = True
1592
    else:
1593
      nimg.instances = idata
1594

    
1595
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1596
    """Verifies and computes a node information map
1597

1598
    @type ninfo: L{objects.Node}
1599
    @param ninfo: the node to check
1600
    @param nresult: the remote results for the node
1601
    @param nimg: the node image object
1602
    @param vg_name: the configured VG name
1603

1604
    """
1605
    node = ninfo.name
1606
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1607

    
1608
    # try to read free memory (from the hypervisor)
1609
    hv_info = nresult.get(constants.NV_HVINFO, None)
1610
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1611
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1612
    if not test:
1613
      try:
1614
        nimg.mfree = int(hv_info["memory_free"])
1615
      except (ValueError, TypeError):
1616
        _ErrorIf(True, self.ENODERPC, node,
1617
                 "node returned invalid nodeinfo, check hypervisor")
1618

    
1619
    # FIXME: devise a free space model for file based instances as well
1620
    if vg_name is not None:
1621
      test = (constants.NV_VGLIST not in nresult or
1622
              vg_name not in nresult[constants.NV_VGLIST])
1623
      _ErrorIf(test, self.ENODELVM, node,
1624
               "node didn't return data for the volume group '%s'"
1625
               " - it is either missing or broken", vg_name)
1626
      if not test:
1627
        try:
1628
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1629
        except (ValueError, TypeError):
1630
          _ErrorIf(True, self.ENODERPC, node,
1631
                   "node returned invalid LVM info, check LVM status")
1632

    
1633
  def CheckPrereq(self):
1634
    """Check prerequisites.
1635

1636
    Transform the list of checks we're going to skip into a set and check that
1637
    all its members are valid.
1638

1639
    """
1640
    self.skip_set = frozenset(self.op.skip_checks)
1641
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
1642
      raise errors.OpPrereqError("Invalid checks to be skipped specified",
1643
                                 errors.ECODE_INVAL)
1644

    
1645
  def BuildHooksEnv(self):
1646
    """Build hooks env.
1647

1648
    Cluster-Verify hooks just ran in the post phase and their failure makes
1649
    the output be logged in the verify output and the verification to fail.
1650

1651
    """
1652
    all_nodes = self.cfg.GetNodeList()
1653
    env = {
1654
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
1655
      }
1656
    for node in self.cfg.GetAllNodesInfo().values():
1657
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
1658

    
1659
    return env, [], all_nodes
1660

    
1661
  def Exec(self, feedback_fn):
1662
    """Verify integrity of cluster, performing various test on nodes.
1663

1664
    """
1665
    self.bad = False
1666
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1667
    verbose = self.op.verbose
1668
    self._feedback_fn = feedback_fn
1669
    feedback_fn("* Verifying global settings")
1670
    for msg in self.cfg.VerifyConfig():
1671
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
1672

    
1673
    # Check the cluster certificates
1674
    for cert_filename in constants.ALL_CERT_FILES:
1675
      (errcode, msg) = _VerifyCertificate(cert_filename)
1676
      _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
1677

    
1678
    vg_name = self.cfg.GetVGName()
1679
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
1680
    cluster = self.cfg.GetClusterInfo()
1681
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
1682
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
1683
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
1684
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
1685
                        for iname in instancelist)
1686
    i_non_redundant = [] # Non redundant instances
1687
    i_non_a_balanced = [] # Non auto-balanced instances
1688
    n_offline = 0 # Count of offline nodes
1689
    n_drained = 0 # Count of nodes being drained
1690
    node_vol_should = {}
1691

    
1692
    # FIXME: verify OS list
1693
    # do local checksums
1694
    master_files = [constants.CLUSTER_CONF_FILE]
1695
    master_node = self.master_node = self.cfg.GetMasterNode()
1696
    master_ip = self.cfg.GetMasterIP()
1697

    
1698
    file_names = ssconf.SimpleStore().GetFileList()
1699
    file_names.extend(constants.ALL_CERT_FILES)
1700
    file_names.extend(master_files)
1701
    if cluster.modify_etc_hosts:
1702
      file_names.append(constants.ETC_HOSTS)
1703

    
1704
    local_checksums = utils.FingerprintFiles(file_names)
1705

    
1706
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1707
    node_verify_param = {
1708
      constants.NV_FILELIST: file_names,
1709
      constants.NV_NODELIST: [node.name for node in nodeinfo
1710
                              if not node.offline],
1711
      constants.NV_HYPERVISOR: hypervisors,
1712
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1713
                                  node.secondary_ip) for node in nodeinfo
1714
                                 if not node.offline],
1715
      constants.NV_INSTANCELIST: hypervisors,
1716
      constants.NV_VERSION: None,
1717
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1718
      constants.NV_NODESETUP: None,
1719
      constants.NV_TIME: None,
1720
      constants.NV_MASTERIP: (master_node, master_ip),
1721
      }
1722

    
1723
    if vg_name is not None:
1724
      node_verify_param[constants.NV_VGLIST] = None
1725
      node_verify_param[constants.NV_LVLIST] = vg_name
1726
      node_verify_param[constants.NV_PVLIST] = [vg_name]
1727
      node_verify_param[constants.NV_DRBDLIST] = None
1728

    
1729
    # Build our expected cluster state
1730
    node_image = dict((node.name, self.NodeImage(offline=node.offline))
1731
                      for node in nodeinfo)
1732

    
1733
    for instance in instancelist:
1734
      inst_config = instanceinfo[instance]
1735

    
1736
      for nname in inst_config.all_nodes:
1737
        if nname not in node_image:
1738
          # ghost node
1739
          gnode = self.NodeImage()
1740
          gnode.ghost = True
1741
          node_image[nname] = gnode
1742

    
1743
      inst_config.MapLVsByNode(node_vol_should)
1744

    
1745
      pnode = inst_config.primary_node
1746
      node_image[pnode].pinst.append(instance)
1747

    
1748
      for snode in inst_config.secondary_nodes:
1749
        nimg = node_image[snode]
1750
        nimg.sinst.append(instance)
1751
        if pnode not in nimg.sbp:
1752
          nimg.sbp[pnode] = []
1753
        nimg.sbp[pnode].append(instance)
1754

    
1755
    # At this point, we have the in-memory data structures complete,
1756
    # except for the runtime information, which we'll gather next
1757

    
1758
    # Due to the way our RPC system works, exact response times cannot be
1759
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
1760
    # time before and after executing the request, we can at least have a time
1761
    # window.
1762
    nvinfo_starttime = time.time()
1763
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1764
                                           self.cfg.GetClusterName())
1765
    nvinfo_endtime = time.time()
1766

    
1767
    all_drbd_map = self.cfg.ComputeDRBDMap()
1768

    
1769
    feedback_fn("* Verifying node status")
1770
    for node_i in nodeinfo:
1771
      node = node_i.name
1772
      nimg = node_image[node]
1773

    
1774
      if node_i.offline:
1775
        if verbose:
1776
          feedback_fn("* Skipping offline node %s" % (node,))
1777
        n_offline += 1
1778
        continue
1779

    
1780
      if node == master_node:
1781
        ntype = "master"
1782
      elif node_i.master_candidate:
1783
        ntype = "master candidate"
1784
      elif node_i.drained:
1785
        ntype = "drained"
1786
        n_drained += 1
1787
      else:
1788
        ntype = "regular"
1789
      if verbose:
1790
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1791

    
1792
      msg = all_nvinfo[node].fail_msg
1793
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
1794
      if msg:
1795
        nimg.rpc_fail = True
1796
        continue
1797

    
1798
      nresult = all_nvinfo[node].payload
1799

    
1800
      nimg.call_ok = self._VerifyNode(node_i, nresult)
1801
      self._VerifyNodeNetwork(node_i, nresult)
1802
      self._VerifyNodeLVM(node_i, nresult, vg_name)
1803
      self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
1804
                            master_files)
1805
      self._VerifyNodeDrbd(node_i, nresult, instanceinfo, all_drbd_map)
1806
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
1807

    
1808
      self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
1809
      self._UpdateNodeInstances(node_i, nresult, nimg)
1810
      self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
1811

    
1812
    feedback_fn("* Verifying instance status")
1813
    for instance in instancelist:
1814
      if verbose:
1815
        feedback_fn("* Verifying instance %s" % instance)
1816
      inst_config = instanceinfo[instance]
1817
      self._VerifyInstance(instance, inst_config, node_image)
1818
      inst_nodes_offline = []
1819

    
1820
      pnode = inst_config.primary_node
1821
      pnode_img = node_image[pnode]
1822
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
1823
               self.ENODERPC, pnode, "instance %s, connection to"
1824
               " primary node failed", instance)
1825

    
1826
      if pnode_img.offline:
1827
        inst_nodes_offline.append(pnode)
1828

    
1829
      # If the instance is non-redundant we cannot survive losing its primary
1830
      # node, so we are not N+1 compliant. On the other hand we have no disk
1831
      # templates with more than one secondary so that situation is not well
1832
      # supported either.
1833
      # FIXME: does not support file-backed instances
1834
      if not inst_config.secondary_nodes:
1835
        i_non_redundant.append(instance)
1836
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
1837
               instance, "instance has multiple secondary nodes: %s",
1838
               utils.CommaJoin(inst_config.secondary_nodes),
1839
               code=self.ETYPE_WARNING)
1840

    
1841
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1842
        i_non_a_balanced.append(instance)
1843

    
1844
      for snode in inst_config.secondary_nodes:
1845
        s_img = node_image[snode]
1846
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
1847
                 "instance %s, connection to secondary node failed", instance)
1848

    
1849
        if s_img.offline:
1850
          inst_nodes_offline.append(snode)
1851

    
1852
      # warn that the instance lives on offline nodes
1853
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
1854
               "instance lives on offline node(s) %s",
1855
               utils.CommaJoin(inst_nodes_offline))
1856
      # ... or ghost nodes
1857
      for node in inst_config.all_nodes:
1858
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
1859
                 "instance lives on ghost node %s", node)
1860

    
1861
    feedback_fn("* Verifying orphan volumes")
1862
    self._VerifyOrphanVolumes(node_vol_should, node_image)
1863

    
1864
    feedback_fn("* Verifying orphan instances")
1865
    self._VerifyOrphanInstances(instancelist, node_image)
1866

    
1867
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1868
      feedback_fn("* Verifying N+1 Memory redundancy")
1869
      self._VerifyNPlusOneMemory(node_image, instanceinfo)
1870

    
1871
    feedback_fn("* Other Notes")
1872
    if i_non_redundant:
1873
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1874
                  % len(i_non_redundant))
1875

    
1876
    if i_non_a_balanced:
1877
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1878
                  % len(i_non_a_balanced))
1879

    
1880
    if n_offline:
1881
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
1882

    
1883
    if n_drained:
1884
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
1885

    
1886
    return not self.bad
1887

    
1888
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1889
    """Analyze the post-hooks' result
1890

1891
    This method analyses the hook result, handles it, and sends some
1892
    nicely-formatted feedback back to the user.
1893

1894
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1895
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1896
    @param hooks_results: the results of the multi-node hooks rpc call
1897
    @param feedback_fn: function used send feedback back to the caller
1898
    @param lu_result: previous Exec result
1899
    @return: the new Exec result, based on the previous result
1900
        and hook results
1901

1902
    """
1903
    # We only really run POST phase hooks, and are only interested in
1904
    # their results
1905
    if phase == constants.HOOKS_PHASE_POST:
1906
      # Used to change hooks' output to proper indentation
1907
      indent_re = re.compile('^', re.M)
1908
      feedback_fn("* Hooks Results")
1909
      assert hooks_results, "invalid result from hooks"
1910

    
1911
      for node_name in hooks_results:
1912
        res = hooks_results[node_name]
1913
        msg = res.fail_msg
1914
        test = msg and not res.offline
1915
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
1916
                      "Communication failure in hooks execution: %s", msg)
1917
        if res.offline or msg:
1918
          # No need to investigate payload if node is offline or gave an error.
1919
          # override manually lu_result here as _ErrorIf only
1920
          # overrides self.bad
1921
          lu_result = 1
1922
          continue
1923
        for script, hkr, output in res.payload:
1924
          test = hkr == constants.HKR_FAIL
1925
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
1926
                        "Script %s failed, output:", script)
1927
          if test:
1928
            output = indent_re.sub('      ', output)
1929
            feedback_fn("%s" % output)
1930
            lu_result = 0
1931

    
1932
      return lu_result
1933

    
1934

    
1935
class LUVerifyDisks(NoHooksLU):
1936
  """Verifies the cluster disks status.
1937

1938
  """
1939
  _OP_REQP = []
1940
  REQ_BGL = False
1941

    
1942
  def ExpandNames(self):
1943
    self.needed_locks = {
1944
      locking.LEVEL_NODE: locking.ALL_SET,
1945
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1946
    }
1947
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1948

    
1949
  def CheckPrereq(self):
1950
    """Check prerequisites.
1951

1952
    This has no prerequisites.
1953

1954
    """
1955
    pass
1956

    
1957
  def Exec(self, feedback_fn):
1958
    """Verify integrity of cluster disks.
1959

1960
    @rtype: tuple of three items
1961
    @return: a tuple of (dict of node-to-node_error, list of instances
1962
        which need activate-disks, dict of instance: (node, volume) for
1963
        missing volumes
1964

1965
    """
1966
    result = res_nodes, res_instances, res_missing = {}, [], {}
1967

    
1968
    vg_name = self.cfg.GetVGName()
1969
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1970
    instances = [self.cfg.GetInstanceInfo(name)
1971
                 for name in self.cfg.GetInstanceList()]
1972

    
1973
    nv_dict = {}
1974
    for inst in instances:
1975
      inst_lvs = {}
1976
      if (not inst.admin_up or
1977
          inst.disk_template not in constants.DTS_NET_MIRROR):
1978
        continue
1979
      inst.MapLVsByNode(inst_lvs)
1980
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1981
      for node, vol_list in inst_lvs.iteritems():
1982
        for vol in vol_list:
1983
          nv_dict[(node, vol)] = inst
1984

    
1985
    if not nv_dict:
1986
      return result
1987

    
1988
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
1989

    
1990
    for node in nodes:
1991
      # node_volume
1992
      node_res = node_lvs[node]
1993
      if node_res.offline:
1994
        continue
1995
      msg = node_res.fail_msg
1996
      if msg:
1997
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1998
        res_nodes[node] = msg
1999
        continue
2000

    
2001
      lvs = node_res.payload
2002
      for lv_name, (_, _, lv_online) in lvs.items():
2003
        inst = nv_dict.pop((node, lv_name), None)
2004
        if (not lv_online and inst is not None
2005
            and inst.name not in res_instances):
2006
          res_instances.append(inst.name)
2007

    
2008
    # any leftover items in nv_dict are missing LVs, let's arrange the
2009
    # data better
2010
    for key, inst in nv_dict.iteritems():
2011
      if inst.name not in res_missing:
2012
        res_missing[inst.name] = []
2013
      res_missing[inst.name].append(key)
2014

    
2015
    return result
2016

    
2017

    
2018
class LURepairDiskSizes(NoHooksLU):
2019
  """Verifies the cluster disks sizes.
2020

2021
  """
2022
  _OP_REQP = ["instances"]
2023
  REQ_BGL = False
2024

    
2025
  def ExpandNames(self):
2026
    if not isinstance(self.op.instances, list):
2027
      raise errors.OpPrereqError("Invalid argument type 'instances'",
2028
                                 errors.ECODE_INVAL)
2029

    
2030
    if self.op.instances:
2031
      self.wanted_names = []
2032
      for name in self.op.instances:
2033
        full_name = _ExpandInstanceName(self.cfg, name)
2034
        self.wanted_names.append(full_name)
2035
      self.needed_locks = {
2036
        locking.LEVEL_NODE: [],
2037
        locking.LEVEL_INSTANCE: self.wanted_names,
2038
        }
2039
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2040
    else:
2041
      self.wanted_names = None
2042
      self.needed_locks = {
2043
        locking.LEVEL_NODE: locking.ALL_SET,
2044
        locking.LEVEL_INSTANCE: locking.ALL_SET,
2045
        }
2046
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2047

    
2048
  def DeclareLocks(self, level):
2049
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
2050
      self._LockInstancesNodes(primary_only=True)
2051

    
2052
  def CheckPrereq(self):
2053
    """Check prerequisites.
2054

2055
    This only checks the optional instance list against the existing names.
2056

2057
    """
2058
    if self.wanted_names is None:
2059
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2060

    
2061
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2062
                             in self.wanted_names]
2063

    
2064
  def _EnsureChildSizes(self, disk):
2065
    """Ensure children of the disk have the needed disk size.
2066

2067
    This is valid mainly for DRBD8 and fixes an issue where the
2068
    children have smaller disk size.
2069

2070
    @param disk: an L{ganeti.objects.Disk} object
2071

2072
    """
2073
    if disk.dev_type == constants.LD_DRBD8:
2074
      assert disk.children, "Empty children for DRBD8?"
2075
      fchild = disk.children[0]
2076
      mismatch = fchild.size < disk.size
2077
      if mismatch:
2078
        self.LogInfo("Child disk has size %d, parent %d, fixing",
2079
                     fchild.size, disk.size)
2080
        fchild.size = disk.size
2081

    
2082
      # and we recurse on this child only, not on the metadev
2083
      return self._EnsureChildSizes(fchild) or mismatch
2084
    else:
2085
      return False
2086

    
2087
  def Exec(self, feedback_fn):
2088
    """Verify the size of cluster disks.
2089

2090
    """
2091
    # TODO: check child disks too
2092
    # TODO: check differences in size between primary/secondary nodes
2093
    per_node_disks = {}
2094
    for instance in self.wanted_instances:
2095
      pnode = instance.primary_node
2096
      if pnode not in per_node_disks:
2097
        per_node_disks[pnode] = []
2098
      for idx, disk in enumerate(instance.disks):
2099
        per_node_disks[pnode].append((instance, idx, disk))
2100

    
2101
    changed = []
2102
    for node, dskl in per_node_disks.items():
2103
      newl = [v[2].Copy() for v in dskl]
2104
      for dsk in newl:
2105
        self.cfg.SetDiskID(dsk, node)
2106
      result = self.rpc.call_blockdev_getsizes(node, newl)
2107
      if result.fail_msg:
2108
        self.LogWarning("Failure in blockdev_getsizes call to node"
2109
                        " %s, ignoring", node)
2110
        continue
2111
      if len(result.data) != len(dskl):
2112
        self.LogWarning("Invalid result from node %s, ignoring node results",
2113
                        node)
2114
        continue
2115
      for ((instance, idx, disk), size) in zip(dskl, result.data):
2116
        if size is None:
2117
          self.LogWarning("Disk %d of instance %s did not return size"
2118
                          " information, ignoring", idx, instance.name)
2119
          continue
2120
        if not isinstance(size, (int, long)):
2121
          self.LogWarning("Disk %d of instance %s did not return valid"
2122
                          " size information, ignoring", idx, instance.name)
2123
          continue
2124
        size = size >> 20
2125
        if size != disk.size:
2126
          self.LogInfo("Disk %d of instance %s has mismatched size,"
2127
                       " correcting: recorded %d, actual %d", idx,
2128
                       instance.name, disk.size, size)
2129
          disk.size = size
2130
          self.cfg.Update(instance, feedback_fn)
2131
          changed.append((instance.name, idx, size))
2132
        if self._EnsureChildSizes(disk):
2133
          self.cfg.Update(instance, feedback_fn)
2134
          changed.append((instance.name, idx, disk.size))
2135
    return changed
2136

    
2137

    
2138
class LURenameCluster(LogicalUnit):
2139
  """Rename the cluster.
2140

2141
  """
2142
  HPATH = "cluster-rename"
2143
  HTYPE = constants.HTYPE_CLUSTER
2144
  _OP_REQP = ["name"]
2145

    
2146
  def BuildHooksEnv(self):
2147
    """Build hooks env.
2148

2149
    """
2150
    env = {
2151
      "OP_TARGET": self.cfg.GetClusterName(),
2152
      "NEW_NAME": self.op.name,
2153
      }
2154
    mn = self.cfg.GetMasterNode()
2155
    all_nodes = self.cfg.GetNodeList()
2156
    return env, [mn], all_nodes
2157

    
2158
  def CheckPrereq(self):
2159
    """Verify that the passed name is a valid one.
2160

2161
    """
2162
    hostname = utils.GetHostInfo(self.op.name)
2163

    
2164
    new_name = hostname.name
2165
    self.ip = new_ip = hostname.ip
2166
    old_name = self.cfg.GetClusterName()
2167
    old_ip = self.cfg.GetMasterIP()
2168
    if new_name == old_name and new_ip == old_ip:
2169
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
2170
                                 " cluster has changed",
2171
                                 errors.ECODE_INVAL)
2172
    if new_ip != old_ip:
2173
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2174
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
2175
                                   " reachable on the network. Aborting." %
2176
                                   new_ip, errors.ECODE_NOTUNIQUE)
2177

    
2178
    self.op.name = new_name
2179

    
2180
  def Exec(self, feedback_fn):
2181
    """Rename the cluster.
2182

2183
    """
2184
    clustername = self.op.name
2185
    ip = self.ip
2186

    
2187
    # shutdown the master IP
2188
    master = self.cfg.GetMasterNode()
2189
    result = self.rpc.call_node_stop_master(master, False)
2190
    result.Raise("Could not disable the master role")
2191

    
2192
    try:
2193
      cluster = self.cfg.GetClusterInfo()
2194
      cluster.cluster_name = clustername
2195
      cluster.master_ip = ip
2196
      self.cfg.Update(cluster, feedback_fn)
2197

    
2198
      # update the known hosts file
2199
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2200
      node_list = self.cfg.GetNodeList()
2201
      try:
2202
        node_list.remove(master)
2203
      except ValueError:
2204
        pass
2205
      result = self.rpc.call_upload_file(node_list,
2206
                                         constants.SSH_KNOWN_HOSTS_FILE)
2207
      for to_node, to_result in result.iteritems():
2208
        msg = to_result.fail_msg
2209
        if msg:
2210
          msg = ("Copy of file %s to node %s failed: %s" %
2211
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
2212
          self.proc.LogWarning(msg)
2213

    
2214
    finally:
2215
      result = self.rpc.call_node_start_master(master, False, False)
2216
      msg = result.fail_msg
2217
      if msg:
2218
        self.LogWarning("Could not re-enable the master role on"
2219
                        " the master, please restart manually: %s", msg)
2220

    
2221

    
2222
def _RecursiveCheckIfLVMBased(disk):
2223
  """Check if the given disk or its children are lvm-based.
2224

2225
  @type disk: L{objects.Disk}
2226
  @param disk: the disk to check
2227
  @rtype: boolean
2228
  @return: boolean indicating whether a LD_LV dev_type was found or not
2229

2230
  """
2231
  if disk.children:
2232
    for chdisk in disk.children:
2233
      if _RecursiveCheckIfLVMBased(chdisk):
2234
        return True
2235
  return disk.dev_type == constants.LD_LV
2236

    
2237

    
2238
class LUSetClusterParams(LogicalUnit):
2239
  """Change the parameters of the cluster.
2240

2241
  """
2242
  HPATH = "cluster-modify"
2243
  HTYPE = constants.HTYPE_CLUSTER
2244
  _OP_REQP = []
2245
  REQ_BGL = False
2246

    
2247
  def CheckArguments(self):
2248
    """Check parameters
2249

2250
    """
2251
    for attr in ["candidate_pool_size",
2252
                 "uid_pool", "add_uids", "remove_uids"]:
2253
      if not hasattr(self.op, attr):
2254
        setattr(self.op, attr, None)
2255

    
2256
    if self.op.candidate_pool_size is not None:
2257
      try:
2258
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
2259
      except (ValueError, TypeError), err:
2260
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
2261
                                   str(err), errors.ECODE_INVAL)
2262
      if self.op.candidate_pool_size < 1:
2263
        raise errors.OpPrereqError("At least one master candidate needed",
2264
                                   errors.ECODE_INVAL)
2265

    
2266
    _CheckBooleanOpField(self.op, "maintain_node_health")
2267

    
2268
    if self.op.uid_pool:
2269
      uidpool.CheckUidPool(self.op.uid_pool)
2270

    
2271
    if self.op.add_uids:
2272
      uidpool.CheckUidPool(self.op.add_uids)
2273

    
2274
    if self.op.remove_uids:
2275
      uidpool.CheckUidPool(self.op.remove_uids)
2276

    
2277
  def ExpandNames(self):
2278
    # FIXME: in the future maybe other cluster params won't require checking on
2279
    # all nodes to be modified.
2280
    self.needed_locks = {
2281
      locking.LEVEL_NODE: locking.ALL_SET,
2282
    }
2283
    self.share_locks[locking.LEVEL_NODE] = 1
2284

    
2285
  def BuildHooksEnv(self):
2286
    """Build hooks env.
2287

2288
    """
2289
    env = {
2290
      "OP_TARGET": self.cfg.GetClusterName(),
2291
      "NEW_VG_NAME": self.op.vg_name,
2292
      }
2293
    mn = self.cfg.GetMasterNode()
2294
    return env, [mn], [mn]
2295

    
2296
  def CheckPrereq(self):
2297
    """Check prerequisites.
2298

2299
    This checks whether the given params don't conflict and
2300
    if the given volume group is valid.
2301

2302
    """
2303
    if self.op.vg_name is not None and not self.op.vg_name:
2304
      instances = self.cfg.GetAllInstancesInfo().values()
2305
      for inst in instances:
2306
        for disk in inst.disks:
2307
          if _RecursiveCheckIfLVMBased(disk):
2308
            raise errors.OpPrereqError("Cannot disable lvm storage while"
2309
                                       " lvm-based instances exist",
2310
                                       errors.ECODE_INVAL)
2311

    
2312
    node_list = self.acquired_locks[locking.LEVEL_NODE]
2313

    
2314
    # if vg_name not None, checks given volume group on all nodes
2315
    if self.op.vg_name:
2316
      vglist = self.rpc.call_vg_list(node_list)
2317
      for node in node_list:
2318
        msg = vglist[node].fail_msg
2319
        if msg:
2320
          # ignoring down node
2321
          self.LogWarning("Error while gathering data on node %s"
2322
                          " (ignoring node): %s", node, msg)
2323
          continue
2324
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2325
                                              self.op.vg_name,
2326
                                              constants.MIN_VG_SIZE)
2327
        if vgstatus:
2328
          raise errors.OpPrereqError("Error on node '%s': %s" %
2329
                                     (node, vgstatus), errors.ECODE_ENVIRON)
2330

    
2331
    self.cluster = cluster = self.cfg.GetClusterInfo()
2332
    # validate params changes
2333
    if self.op.beparams:
2334
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2335
      self.new_beparams = objects.FillDict(
2336
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
2337

    
2338
    if self.op.nicparams:
2339
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2340
      self.new_nicparams = objects.FillDict(
2341
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
2342
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
2343
      nic_errors = []
2344

    
2345
      # check all instances for consistency
2346
      for instance in self.cfg.GetAllInstancesInfo().values():
2347
        for nic_idx, nic in enumerate(instance.nics):
2348
          params_copy = copy.deepcopy(nic.nicparams)
2349
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
2350

    
2351
          # check parameter syntax
2352
          try:
2353
            objects.NIC.CheckParameterSyntax(params_filled)
2354
          except errors.ConfigurationError, err:
2355
            nic_errors.append("Instance %s, nic/%d: %s" %
2356
                              (instance.name, nic_idx, err))
2357

    
2358
          # if we're moving instances to routed, check that they have an ip
2359
          target_mode = params_filled[constants.NIC_MODE]
2360
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2361
            nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2362
                              (instance.name, nic_idx))
2363
      if nic_errors:
2364
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2365
                                   "\n".join(nic_errors))
2366

    
2367
    # hypervisor list/parameters
2368
    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
2369
    if self.op.hvparams:
2370
      if not isinstance(self.op.hvparams, dict):
2371
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input",
2372
                                   errors.ECODE_INVAL)
2373
      for hv_name, hv_dict in self.op.hvparams.items():
2374
        if hv_name not in self.new_hvparams:
2375
          self.new_hvparams[hv_name] = hv_dict
2376
        else:
2377
          self.new_hvparams[hv_name].update(hv_dict)
2378

    
2379
    # os hypervisor parameters
2380
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2381
    if self.op.os_hvp:
2382
      if not isinstance(self.op.os_hvp, dict):
2383
        raise errors.OpPrereqError("Invalid 'os_hvp' parameter on input",
2384
                                   errors.ECODE_INVAL)
2385
      for os_name, hvs in self.op.os_hvp.items():
2386
        if not isinstance(hvs, dict):
2387
          raise errors.OpPrereqError(("Invalid 'os_hvp' parameter on"
2388
                                      " input"), errors.ECODE_INVAL)
2389
        if os_name not in self.new_os_hvp:
2390
          self.new_os_hvp[os_name] = hvs
2391
        else:
2392
          for hv_name, hv_dict in hvs.items():
2393
            if hv_name not in self.new_os_hvp[os_name]:
2394
              self.new_os_hvp[os_name][hv_name] = hv_dict
2395
            else:
2396
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
2397

    
2398
    # changes to the hypervisor list
2399
    if self.op.enabled_hypervisors is not None:
2400
      self.hv_list = self.op.enabled_hypervisors
2401
      if not self.hv_list:
2402
        raise errors.OpPrereqError("Enabled hypervisors list must contain at"
2403
                                   " least one member",
2404
                                   errors.ECODE_INVAL)
2405
      invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
2406
      if invalid_hvs:
2407
        raise errors.OpPrereqError("Enabled hypervisors contains invalid"
2408
                                   " entries: %s" %
2409
                                   utils.CommaJoin(invalid_hvs),
2410
                                   errors.ECODE_INVAL)
2411
      for hv in self.hv_list:
2412
        # if the hypervisor doesn't already exist in the cluster
2413
        # hvparams, we initialize it to empty, and then (in both
2414
        # cases) we make sure to fill the defaults, as we might not
2415
        # have a complete defaults list if the hypervisor wasn't
2416
        # enabled before
2417
        if hv not in new_hvp:
2418
          new_hvp[hv] = {}
2419
        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
2420
        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
2421
    else:
2422
      self.hv_list = cluster.enabled_hypervisors
2423

    
2424
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
2425
      # either the enabled list has changed, or the parameters have, validate
2426
      for hv_name, hv_params in self.new_hvparams.items():
2427
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
2428
            (self.op.enabled_hypervisors and
2429
             hv_name in self.op.enabled_hypervisors)):
2430
          # either this is a new hypervisor, or its parameters have changed
2431
          hv_class = hypervisor.GetHypervisor(hv_name)
2432
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2433
          hv_class.CheckParameterSyntax(hv_params)
2434
          _CheckHVParams(self, node_list, hv_name, hv_params)
2435

    
2436
    if self.op.os_hvp:
2437
      # no need to check any newly-enabled hypervisors, since the
2438
      # defaults have already been checked in the above code-block
2439
      for os_name, os_hvp in self.new_os_hvp.items():
2440
        for hv_name, hv_params in os_hvp.items():
2441
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2442
          # we need to fill in the new os_hvp on top of the actual hv_p
2443
          cluster_defaults = self.new_hvparams.get(hv_name, {})
2444
          new_osp = objects.FillDict(cluster_defaults, hv_params)
2445
          hv_class = hypervisor.GetHypervisor(hv_name)
2446
          hv_class.CheckParameterSyntax(new_osp)
2447
          _CheckHVParams(self, node_list, hv_name, new_osp)
2448

    
2449

    
2450
  def Exec(self, feedback_fn):
2451
    """Change the parameters of the cluster.
2452

2453
    """
2454
    if self.op.vg_name is not None:
2455
      new_volume = self.op.vg_name
2456
      if not new_volume:
2457
        new_volume = None
2458
      if new_volume != self.cfg.GetVGName():
2459
        self.cfg.SetVGName(new_volume)
2460
      else:
2461
        feedback_fn("Cluster LVM configuration already in desired"
2462
                    " state, not changing")
2463
    if self.op.hvparams:
2464
      self.cluster.hvparams = self.new_hvparams
2465
    if self.op.os_hvp:
2466
      self.cluster.os_hvp = self.new_os_hvp
2467
    if self.op.enabled_hypervisors is not None:
2468
      self.cluster.hvparams = self.new_hvparams
2469
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2470
    if self.op.beparams:
2471
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2472
    if self.op.nicparams:
2473
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2474

    
2475
    if self.op.candidate_pool_size is not None:
2476
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
2477
      # we need to update the pool size here, otherwise the save will fail
2478
      _AdjustCandidatePool(self, [])
2479

    
2480
    if self.op.maintain_node_health is not None:
2481
      self.cluster.maintain_node_health = self.op.maintain_node_health
2482

    
2483
    if self.op.add_uids is not None:
2484
      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
2485

    
2486
    if self.op.remove_uids is not None:
2487
      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
2488

    
2489
    if self.op.uid_pool is not None:
2490
      self.cluster.uid_pool = self.op.uid_pool
2491

    
2492
    self.cfg.Update(self.cluster, feedback_fn)
2493

    
2494

    
2495
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
2496
  """Distribute additional files which are part of the cluster configuration.
2497

2498
  ConfigWriter takes care of distributing the config and ssconf files, but
2499
  there are more files which should be distributed to all nodes. This function
2500
  makes sure those are copied.
2501

2502
  @param lu: calling logical unit
2503
  @param additional_nodes: list of nodes not in the config to distribute to
2504

2505
  """
2506
  # 1. Gather target nodes
2507
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
2508
  dist_nodes = lu.cfg.GetOnlineNodeList()
2509
  if additional_nodes is not None:
2510
    dist_nodes.extend(additional_nodes)
2511
  if myself.name in dist_nodes:
2512
    dist_nodes.remove(myself.name)
2513

    
2514
  # 2. Gather files to distribute
2515
  dist_files = set([constants.ETC_HOSTS,
2516
                    constants.SSH_KNOWN_HOSTS_FILE,
2517
                    constants.RAPI_CERT_FILE,
2518
                    constants.RAPI_USERS_FILE,
2519
                    constants.CONFD_HMAC_KEY,
2520
                    constants.CLUSTER_DOMAIN_SECRET_FILE,
2521
                   ])
2522

    
2523
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
2524
  for hv_name in enabled_hypervisors:
2525
    hv_class = hypervisor.GetHypervisor(hv_name)
2526
    dist_files.update(hv_class.GetAncillaryFiles())
2527

    
2528
  # 3. Perform the files upload
2529
  for fname in dist_files:
2530
    if os.path.exists(fname):
2531
      result = lu.rpc.call_upload_file(dist_nodes, fname)
2532
      for to_node, to_result in result.items():
2533
        msg = to_result.fail_msg
2534
        if msg:
2535
          msg = ("Copy of file %s to node %s failed: %s" %
2536
                 (fname, to_node, msg))
2537
          lu.proc.LogWarning(msg)
2538

    
2539

    
2540
class LURedistributeConfig(NoHooksLU):
2541
  """Force the redistribution of cluster configuration.
2542

2543
  This is a very simple LU.
2544

2545
  """
2546
  _OP_REQP = []
2547
  REQ_BGL = False
2548

    
2549
  def ExpandNames(self):
2550
    self.needed_locks = {
2551
      locking.LEVEL_NODE: locking.ALL_SET,
2552
    }
2553
    self.share_locks[locking.LEVEL_NODE] = 1
2554

    
2555
  def CheckPrereq(self):
2556
    """Check prerequisites.
2557

2558
    """
2559

    
2560
  def Exec(self, feedback_fn):
2561
    """Redistribute the configuration.
2562

2563
    """
2564
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
2565
    _RedistributeAncillaryFiles(self)
2566

    
2567

    
2568
def _WaitForSync(lu, instance, oneshot=False):
2569
  """Sleep and poll for an instance's disk to sync.
2570

2571
  """
2572
  if not instance.disks:
2573
    return True
2574

    
2575
  if not oneshot:
2576
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
2577

    
2578
  node = instance.primary_node
2579

    
2580
  for dev in instance.disks:
2581
    lu.cfg.SetDiskID(dev, node)
2582

    
2583
  # TODO: Convert to utils.Retry
2584

    
2585
  retries = 0
2586
  degr_retries = 10 # in seconds, as we sleep 1 second each time
2587
  while True:
2588
    max_time = 0
2589
    done = True
2590
    cumul_degraded = False
2591
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
2592
    msg = rstats.fail_msg
2593
    if msg:
2594
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
2595
      retries += 1
2596
      if retries >= 10:
2597
        raise errors.RemoteError("Can't contact node %s for mirror data,"
2598
                                 " aborting." % node)
2599
      time.sleep(6)
2600
      continue
2601
    rstats = rstats.payload
2602
    retries = 0
2603
    for i, mstat in enumerate(rstats):
2604
      if mstat is None:
2605
        lu.LogWarning("Can't compute data for node %s/%s",
2606
                           node, instance.disks[i].iv_name)
2607
        continue
2608

    
2609
      cumul_degraded = (cumul_degraded or
2610
                        (mstat.is_degraded and mstat.sync_percent is None))
2611
      if mstat.sync_percent is not None:
2612
        done = False
2613
        if mstat.estimated_time is not None:
2614
          rem_time = "%d estimated seconds remaining" % mstat.estimated_time
2615
          max_time = mstat.estimated_time
2616
        else:
2617
          rem_time = "no time estimate"
2618
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
2619
                        (instance.disks[i].iv_name, mstat.sync_percent,
2620
                         rem_time))
2621

    
2622
    # if we're done but degraded, let's do a few small retries, to
2623
    # make sure we see a stable and not transient situation; therefore
2624
    # we force restart of the loop
2625
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
2626
      logging.info("Degraded disks found, %d retries left", degr_retries)
2627
      degr_retries -= 1
2628
      time.sleep(1)
2629
      continue
2630

    
2631
    if done or oneshot:
2632
      break
2633

    
2634
    time.sleep(min(60, max_time))
2635

    
2636
  if done:
2637
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
2638
  return not cumul_degraded
2639

    
2640

    
2641
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
2642
  """Check that mirrors are not degraded.
2643

2644
  The ldisk parameter, if True, will change the test from the
2645
  is_degraded attribute (which represents overall non-ok status for
2646
  the device(s)) to the ldisk (representing the local storage status).
2647

2648
  """
2649
  lu.cfg.SetDiskID(dev, node)
2650

    
2651
  result = True
2652

    
2653
  if on_primary or dev.AssembleOnSecondary():
2654
    rstats = lu.rpc.call_blockdev_find(node, dev)
2655
    msg = rstats.fail_msg
2656
    if msg:
2657
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
2658
      result = False
2659
    elif not rstats.payload:
2660
      lu.LogWarning("Can't find disk on node %s", node)
2661
      result = False
2662
    else:
2663
      if ldisk:
2664
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
2665
      else:
2666
        result = result and not rstats.payload.is_degraded
2667

    
2668
  if dev.children:
2669
    for child in dev.children:
2670
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
2671

    
2672
  return result
2673

    
2674

    
2675
class LUDiagnoseOS(NoHooksLU):
2676
  """Logical unit for OS diagnose/query.
2677

2678
  """
2679
  _OP_REQP = ["output_fields", "names"]
2680
  REQ_BGL = False
2681
  _FIELDS_STATIC = utils.FieldSet()
2682
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants")
2683
  # Fields that need calculation of global os validity
2684
  _FIELDS_NEEDVALID = frozenset(["valid", "variants"])
2685

    
2686
  def ExpandNames(self):
2687
    if self.op.names:
2688
      raise errors.OpPrereqError("Selective OS query not supported",
2689
                                 errors.ECODE_INVAL)
2690

    
2691
    _CheckOutputFields(static=self._FIELDS_STATIC,
2692
                       dynamic=self._FIELDS_DYNAMIC,
2693
                       selected=self.op.output_fields)
2694

    
2695
    # Lock all nodes, in shared mode
2696
    # Temporary removal of locks, should be reverted later
2697
    # TODO: reintroduce locks when they are lighter-weight
2698
    self.needed_locks = {}
2699
    #self.share_locks[locking.LEVEL_NODE] = 1
2700
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2701

    
2702
  def CheckPrereq(self):
2703
    """Check prerequisites.
2704

2705
    """
2706

    
2707
  @staticmethod
2708
  def _DiagnoseByOS(rlist):
2709
    """Remaps a per-node return list into an a per-os per-node dictionary
2710

2711
    @param rlist: a map with node names as keys and OS objects as values
2712

2713
    @rtype: dict
2714
    @return: a dictionary with osnames as keys and as value another map, with
2715
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
2716

2717
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
2718
                                     (/srv/..., False, "invalid api")],
2719
                           "node2": [(/srv/..., True, "")]}
2720
          }
2721

2722
    """
2723
    all_os = {}
2724
    # we build here the list of nodes that didn't fail the RPC (at RPC
2725
    # level), so that nodes with a non-responding node daemon don't
2726
    # make all OSes invalid
2727
    good_nodes = [node_name for node_name in rlist
2728
                  if not rlist[node_name].fail_msg]
2729
    for node_name, nr in rlist.items():
2730
      if nr.fail_msg or not nr.payload:
2731
        continue
2732
      for name, path, status, diagnose, variants in nr.payload:
2733
        if name not in all_os:
2734
          # build a list of nodes for this os containing empty lists
2735
          # for each node in node_list
2736
          all_os[name] = {}
2737
          for nname in good_nodes:
2738
            all_os[name][nname] = []
2739
        all_os[name][node_name].append((path, status, diagnose, variants))
2740
    return all_os
2741

    
2742
  def Exec(self, feedback_fn):
2743
    """Compute the list of OSes.
2744

2745
    """
2746
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
2747
    node_data = self.rpc.call_os_diagnose(valid_nodes)
2748
    pol = self._DiagnoseByOS(node_data)
2749
    output = []
2750
    calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields)
2751
    calc_variants = "variants" in self.op.output_fields
2752

    
2753
    for os_name, os_data in pol.items():
2754
      row = []
2755
      if calc_valid:
2756
        valid = True
2757
        variants = None
2758
        for osl in os_data.values():
2759
          valid = valid and osl and osl[0][1]
2760
          if not valid:
2761
            variants = None
2762
            break
2763
          if calc_variants:
2764
            node_variants = osl[0][3]
2765
            if variants is None:
2766
              variants = node_variants
2767
            else:
2768
              variants = [v for v in variants if v in node_variants]
2769

    
2770
      for field in self.op.output_fields:
2771
        if field == "name":
2772
          val = os_name
2773
        elif field == "valid":
2774
          val = valid
2775
        elif field == "node_status":
2776
          # this is just a copy of the dict
2777
          val = {}
2778
          for node_name, nos_list in os_data.items():
2779
            val[node_name] = nos_list
2780
        elif field == "variants":
2781
          val =  variants
2782
        else:
2783
          raise errors.ParameterError(field)
2784
        row.append(val)
2785
      output.append(row)
2786

    
2787
    return output
2788

    
2789

    
2790
class LURemoveNode(LogicalUnit):
2791
  """Logical unit for removing a node.
2792

2793
  """
2794
  HPATH = "node-remove"
2795
  HTYPE = constants.HTYPE_NODE
2796
  _OP_REQP = ["node_name"]
2797

    
2798
  def BuildHooksEnv(self):
2799
    """Build hooks env.
2800

2801
    This doesn't run on the target node in the pre phase as a failed
2802
    node would then be impossible to remove.
2803

2804
    """
2805
    env = {
2806
      "OP_TARGET": self.op.node_name,
2807
      "NODE_NAME": self.op.node_name,
2808
      }
2809
    all_nodes = self.cfg.GetNodeList()
2810
    try:
2811
      all_nodes.remove(self.op.node_name)
2812
    except ValueError:
2813
      logging.warning("Node %s which is about to be removed not found"
2814
                      " in the all nodes list", self.op.node_name)
2815
    return env, all_nodes, all_nodes
2816

    
2817
  def CheckPrereq(self):
2818
    """Check prerequisites.
2819

2820
    This checks:
2821
     - the node exists in the configuration
2822
     - it does not have primary or secondary instances
2823
     - it's not the master
2824

2825
    Any errors are signaled by raising errors.OpPrereqError.
2826

2827
    """
2828
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
2829
    node = self.cfg.GetNodeInfo(self.op.node_name)
2830
    assert node is not None
2831

    
2832
    instance_list = self.cfg.GetInstanceList()
2833

    
2834
    masternode = self.cfg.GetMasterNode()
2835
    if node.name == masternode:
2836
      raise errors.OpPrereqError("Node is the master node,"
2837
                                 " you need to failover first.",
2838
                                 errors.ECODE_INVAL)
2839

    
2840
    for instance_name in instance_list:
2841
      instance = self.cfg.GetInstanceInfo(instance_name)
2842
      if node.name in instance.all_nodes:
2843
        raise errors.OpPrereqError("Instance %s is still running on the node,"
2844
                                   " please remove first." % instance_name,
2845
                                   errors.ECODE_INVAL)
2846
    self.op.node_name = node.name
2847
    self.node = node
2848

    
2849
  def Exec(self, feedback_fn):
2850
    """Removes the node from the cluster.
2851

2852
    """
2853
    node = self.node
2854
    logging.info("Stopping the node daemon and removing configs from node %s",
2855
                 node.name)
2856

    
2857
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
2858

    
2859
    # Promote nodes to master candidate as needed
2860
    _AdjustCandidatePool(self, exceptions=[node.name])
2861
    self.context.RemoveNode(node.name)
2862

    
2863
    # Run post hooks on the node before it's removed
2864
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
2865
    try:
2866
      hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
2867
    except:
2868
      # pylint: disable-msg=W0702
2869
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
2870

    
2871
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
2872
    msg = result.fail_msg
2873
    if msg:
2874
      self.LogWarning("Errors encountered on the remote node while leaving"
2875
                      " the cluster: %s", msg)
2876

    
2877
    # Remove node from our /etc/hosts
2878
    if self.cfg.GetClusterInfo().modify_etc_hosts:
2879
      # FIXME: this should be done via an rpc call to node daemon
2880
      utils.RemoveHostFromEtcHosts(node.name)
2881
      _RedistributeAncillaryFiles(self)
2882

    
2883

    
2884
class LUQueryNodes(NoHooksLU):
2885
  """Logical unit for querying nodes.
2886

2887
  """
2888
  # pylint: disable-msg=W0142
2889
  _OP_REQP = ["output_fields", "names", "use_locking"]
2890
  REQ_BGL = False
2891

    
2892
  _SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid",
2893
                    "master_candidate", "offline", "drained"]
2894

    
2895
  _FIELDS_DYNAMIC = utils.FieldSet(
2896
    "dtotal", "dfree",
2897
    "mtotal", "mnode", "mfree",
2898
    "bootid",
2899
    "ctotal", "cnodes", "csockets",
2900
    )
2901

    
2902
  _FIELDS_STATIC = utils.FieldSet(*[
2903
    "pinst_cnt", "sinst_cnt",
2904
    "pinst_list", "sinst_list",
2905
    "pip", "sip", "tags",
2906
    "master",
2907
    "role"] + _SIMPLE_FIELDS
2908
    )
2909

    
2910
  def ExpandNames(self):
2911
    _CheckOutputFields(static=self._FIELDS_STATIC,
2912
                       dynamic=self._FIELDS_DYNAMIC,
2913
                       selected=self.op.output_fields)
2914

    
2915
    self.needed_locks = {}
2916
    self.share_locks[locking.LEVEL_NODE] = 1
2917

    
2918
    if self.op.names:
2919
      self.wanted = _GetWantedNodes(self, self.op.names)
2920
    else:
2921
      self.wanted = locking.ALL_SET
2922

    
2923
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2924
    self.do_locking = self.do_node_query and self.op.use_locking
2925
    if self.do_locking:
2926
      # if we don't request only static fields, we need to lock the nodes
2927
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2928

    
2929
  def CheckPrereq(self):
2930
    """Check prerequisites.
2931

2932
    """
2933
    # The validation of the node list is done in the _GetWantedNodes,
2934
    # if non empty, and if empty, there's no validation to do
2935
    pass
2936

    
2937
  def Exec(self, feedback_fn):
2938
    """Computes the list of nodes and their attributes.
2939

2940
    """
2941
    all_info = self.cfg.GetAllNodesInfo()
2942
    if self.do_locking:
2943
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2944
    elif self.wanted != locking.ALL_SET:
2945
      nodenames = self.wanted
2946
      missing = set(nodenames).difference(all_info.keys())
2947
      if missing:
2948
        raise errors.OpExecError(
2949
          "Some nodes were removed before retrieving their data: %s" % missing)
2950
    else:
2951
      nodenames = all_info.keys()
2952

    
2953
    nodenames = utils.NiceSort(nodenames)
2954
    nodelist = [all_info[name] for name in nodenames]
2955

    
2956
    # begin data gathering
2957

    
2958
    if self.do_node_query:
2959
      live_data = {}
2960
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2961
                                          self.cfg.GetHypervisorType())
2962
      for name in nodenames:
2963
        nodeinfo = node_data[name]
2964
        if not nodeinfo.fail_msg and nodeinfo.payload:
2965
          nodeinfo = nodeinfo.payload
2966
          fn = utils.TryConvert
2967
          live_data[name] = {
2968
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2969
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2970
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2971
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2972
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2973
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2974
            "bootid": nodeinfo.get('bootid', None),
2975
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2976
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2977
            }
2978
        else:
2979
          live_data[name] = {}
2980
    else:
2981
      live_data = dict.fromkeys(nodenames, {})
2982

    
2983
    node_to_primary = dict([(name, set()) for name in nodenames])
2984
    node_to_secondary = dict([(name, set()) for name in nodenames])
2985

    
2986
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2987
                             "sinst_cnt", "sinst_list"))
2988
    if inst_fields & frozenset(self.op.output_fields):
2989
      inst_data = self.cfg.GetAllInstancesInfo()
2990

    
2991
      for inst in inst_data.values():
2992
        if inst.primary_node in node_to_primary:
2993
          node_to_primary[inst.primary_node].add(inst.name)
2994
        for secnode in inst.secondary_nodes:
2995
          if secnode in node_to_secondary:
2996
            node_to_secondary[secnode].add(inst.name)
2997

    
2998
    master_node = self.cfg.GetMasterNode()
2999

    
3000
    # end data gathering
3001

    
3002
    output = []
3003
    for node in nodelist:
3004
      node_output = []
3005
      for field in self.op.output_fields:
3006
        if field in self._SIMPLE_FIELDS:
3007
          val = getattr(node, field)
3008
        elif field == "pinst_list":
3009
          val = list(node_to_primary[node.name])
3010
        elif field == "sinst_list":
3011
          val = list(node_to_secondary[node.name])
3012
        elif field == "pinst_cnt":
3013
          val = len(node_to_primary[node.name])
3014
        elif field == "sinst_cnt":
3015
          val = len(node_to_secondary[node.name])
3016
        elif field == "pip":
3017
          val = node.primary_ip
3018
        elif field == "sip":
3019
          val = node.secondary_ip
3020
        elif field == "tags":
3021
          val = list(node.GetTags())
3022
        elif field == "master":
3023
          val = node.name == master_node
3024
        elif self._FIELDS_DYNAMIC.Matches(field):
3025
          val = live_data[node.name].get(field, None)
3026
        elif field == "role":
3027
          if node.name == master_node:
3028
            val = "M"
3029
          elif node.master_candidate:
3030
            val = "C"
3031
          elif node.drained:
3032
            val = "D"
3033
          elif node.offline:
3034
            val = "O"
3035
          else:
3036
            val = "R"
3037
        else:
3038
          raise errors.ParameterError(field)
3039
        node_output.append(val)
3040
      output.append(node_output)
3041

    
3042
    return output
3043

    
3044

    
3045
class LUQueryNodeVolumes(NoHooksLU):
3046
  """Logical unit for getting volumes on node(s).
3047

3048
  """
3049
  _OP_REQP = ["nodes", "output_fields"]
3050
  REQ_BGL = False
3051
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3052
  _FIELDS_STATIC = utils.FieldSet("node")
3053

    
3054
  def ExpandNames(self):
3055
    _CheckOutputFields(static=self._FIELDS_STATIC,
3056
                       dynamic=self._FIELDS_DYNAMIC,
3057
                       selected=self.op.output_fields)
3058

    
3059
    self.needed_locks = {}
3060
    self.share_locks[locking.LEVEL_NODE] = 1
3061
    if not self.op.nodes:
3062
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3063
    else:
3064
      self.needed_locks[locking.LEVEL_NODE] = \
3065
        _GetWantedNodes(self, self.op.nodes)
3066

    
3067
  def CheckPrereq(self):
3068
    """Check prerequisites.
3069

3070
    This checks that the fields required are valid output fields.
3071

3072
    """
3073
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3074

    
3075
  def Exec(self, feedback_fn):
3076
    """Computes the list of nodes and their attributes.
3077

3078
    """
3079
    nodenames = self.nodes
3080
    volumes = self.rpc.call_node_volumes(nodenames)
3081

    
3082
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
3083
             in self.cfg.GetInstanceList()]
3084

    
3085
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3086

    
3087
    output = []
3088
    for node in nodenames:
3089
      nresult = volumes[node]
3090
      if nresult.offline:
3091
        continue
3092
      msg = nresult.fail_msg
3093
      if msg:
3094
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3095
        continue
3096

    
3097
      node_vols = nresult.payload[:]
3098
      node_vols.sort(key=lambda vol: vol['dev'])
3099

    
3100
      for vol in node_vols:
3101
        node_output = []
3102
        for field in self.op.output_fields:
3103
          if field == "node":
3104
            val = node
3105
          elif field == "phys":
3106
            val = vol['dev']
3107
          elif field == "vg":
3108
            val = vol['vg']
3109
          elif field == "name":
3110
            val = vol['name']
3111
          elif field == "size":
3112
            val = int(float(vol['size']))
3113
          elif field == "instance":
3114
            for inst in ilist:
3115
              if node not in lv_by_node[inst]:
3116
                continue
3117
              if vol['name'] in lv_by_node[inst][node]:
3118
                val = inst.name
3119
                break
3120
            else:
3121
              val = '-'
3122
          else:
3123
            raise errors.ParameterError(field)
3124
          node_output.append(str(val))
3125

    
3126
        output.append(node_output)
3127

    
3128
    return output
3129

    
3130

    
3131
class LUQueryNodeStorage(NoHooksLU):
3132
  """Logical unit for getting information on storage units on node(s).
3133

3134
  """
3135
  _OP_REQP = ["nodes", "storage_type", "output_fields"]
3136
  REQ_BGL = False
3137
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3138

    
3139
  def CheckArguments(self):
3140
    _CheckStorageType(self.op.storage_type)
3141

    
3142
    _CheckOutputFields(static=self._FIELDS_STATIC,
3143
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3144
                       selected=self.op.output_fields)
3145

    
3146
  def ExpandNames(self):
3147
    self.needed_locks = {}
3148
    self.share_locks[locking.LEVEL_NODE] = 1
3149

    
3150
    if self.op.nodes:
3151
      self.needed_locks[locking.LEVEL_NODE] = \
3152
        _GetWantedNodes(self, self.op.nodes)
3153
    else:
3154
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3155

    
3156
  def CheckPrereq(self):
3157
    """Check prerequisites.
3158

3159
    This checks that the fields required are valid output fields.
3160

3161
    """
3162
    self.op.name = getattr(self.op, "name", None)
3163

    
3164
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3165

    
3166
  def Exec(self, feedback_fn):
3167
    """Computes the list of nodes and their attributes.
3168

3169
    """
3170
    # Always get name to sort by
3171
    if constants.SF_NAME in self.op.output_fields:
3172
      fields = self.op.output_fields[:]
3173
    else:
3174
      fields = [constants.SF_NAME] + self.op.output_fields
3175

    
3176
    # Never ask for node or type as it's only known to the LU
3177
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
3178
      while extra in fields:
3179
        fields.remove(extra)
3180

    
3181
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3182
    name_idx = field_idx[constants.SF_NAME]
3183

    
3184
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3185
    data = self.rpc.call_storage_list(self.nodes,
3186
                                      self.op.storage_type, st_args,
3187
                                      self.op.name, fields)
3188

    
3189
    result = []
3190

    
3191
    for node in utils.NiceSort(self.nodes):
3192
      nresult = data[node]
3193
      if nresult.offline:
3194
        continue
3195

    
3196
      msg = nresult.fail_msg
3197
      if msg:
3198
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3199
        continue
3200

    
3201
      rows = dict([(row[name_idx], row) for row in nresult.payload])
3202

    
3203
      for name in utils.NiceSort(rows.keys()):
3204
        row = rows[name]
3205

    
3206
        out = []
3207

    
3208
        for field in self.op.output_fields:
3209
          if field == constants.SF_NODE:
3210
            val = node
3211
          elif field == constants.SF_TYPE:
3212
            val = self.op.storage_type
3213
          elif field in field_idx:
3214
            val = row[field_idx[field]]
3215
          else:
3216
            raise errors.ParameterError(field)
3217

    
3218
          out.append(val)
3219

    
3220
        result.append(out)
3221

    
3222
    return result
3223

    
3224

    
3225
class LUModifyNodeStorage(NoHooksLU):
3226
  """Logical unit for modifying a storage volume on a node.
3227

3228
  """
3229
  _OP_REQP = ["node_name", "storage_type", "name", "changes"]
3230
  REQ_BGL = False
3231

    
3232
  def CheckArguments(self):
3233
    self.opnode_name = _ExpandNodeName(self.cfg, self.op.node_name)
3234

    
3235
    _CheckStorageType(self.op.storage_type)
3236

    
3237
  def ExpandNames(self):
3238
    self.needed_locks = {
3239
      locking.LEVEL_NODE: self.op.node_name,
3240
      }
3241

    
3242
  def CheckPrereq(self):
3243
    """Check prerequisites.
3244

3245
    """
3246
    storage_type = self.op.storage_type
3247

    
3248
    try:
3249
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
3250
    except KeyError:
3251
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
3252
                                 " modified" % storage_type,
3253
                                 errors.ECODE_INVAL)
3254

    
3255
    diff = set(self.op.changes.keys()) - modifiable
3256
    if diff:
3257
      raise errors.OpPrereqError("The following fields can not be modified for"
3258
                                 " storage units of type '%s': %r" %
3259
                                 (storage_type, list(diff)),
3260
                                 errors.ECODE_INVAL)
3261

    
3262
  def Exec(self, feedback_fn):
3263
    """Computes the list of nodes and their attributes.
3264

3265
    """
3266
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3267
    result = self.rpc.call_storage_modify(self.op.node_name,
3268
                                          self.op.storage_type, st_args,
3269
                                          self.op.name, self.op.changes)
3270
    result.Raise("Failed to modify storage unit '%s' on %s" %
3271
                 (self.op.name, self.op.node_name))
3272

    
3273

    
3274
class LUAddNode(LogicalUnit):
3275
  """Logical unit for adding node to the cluster.
3276

3277
  """
3278
  HPATH = "node-add"
3279
  HTYPE = constants.HTYPE_NODE
3280
  _OP_REQP = ["node_name"]
3281

    
3282
  def CheckArguments(self):
3283
    # validate/normalize the node name
3284
    self.op.node_name = utils.HostInfo.NormalizeName(self.op.node_name)
3285

    
3286
  def BuildHooksEnv(self):
3287
    """Build hooks env.
3288

3289
    This will run on all nodes before, and on all nodes + the new node after.
3290

3291
    """
3292
    env = {
3293
      "OP_TARGET": self.op.node_name,
3294
      "NODE_NAME": self.op.node_name,
3295
      "NODE_PIP": self.op.primary_ip,
3296
      "NODE_SIP": self.op.secondary_ip,
3297
      }
3298
    nodes_0 = self.cfg.GetNodeList()
3299
    nodes_1 = nodes_0 + [self.op.node_name, ]
3300
    return env, nodes_0, nodes_1
3301

    
3302
  def CheckPrereq(self):
3303
    """Check prerequisites.
3304

3305
    This checks:
3306
     - the new node is not already in the config
3307
     - it is resolvable
3308
     - its parameters (single/dual homed) matches the cluster
3309

3310
    Any errors are signaled by raising errors.OpPrereqError.
3311

3312
    """
3313
    node_name = self.op.node_name
3314
    cfg = self.cfg
3315

    
3316
    dns_data = utils.GetHostInfo(node_name)
3317

    
3318
    node = dns_data.name
3319
    primary_ip = self.op.primary_ip = dns_data.ip
3320
    secondary_ip = getattr(self.op, "secondary_ip", None)
3321
    if secondary_ip is None:
3322
      secondary_ip = primary_ip
3323
    if not utils.IsValidIP(secondary_ip):
3324
      raise errors.OpPrereqError("Invalid secondary IP given",
3325
                                 errors.ECODE_INVAL)
3326
    self.op.secondary_ip = secondary_ip
3327

    
3328
    node_list = cfg.GetNodeList()
3329
    if not self.op.readd and node in node_list:
3330
      raise errors.OpPrereqError("Node %s is already in the configuration" %
3331
                                 node, errors.ECODE_EXISTS)
3332
    elif self.op.readd and node not in node_list:
3333
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
3334
                                 errors.ECODE_NOENT)
3335

    
3336
    self.changed_primary_ip = False
3337

    
3338
    for existing_node_name in node_list:
3339
      existing_node = cfg.GetNodeInfo(existing_node_name)
3340

    
3341
      if self.op.readd and node == existing_node_name:
3342
        if existing_node.secondary_ip != secondary_ip:
3343
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
3344
                                     " address configuration as before",
3345
                                     errors.ECODE_INVAL)
3346
        if existing_node.primary_ip != primary_ip:
3347
          self.changed_primary_ip = True
3348

    
3349
        continue
3350

    
3351
      if (existing_node.primary_ip == primary_ip or
3352
          existing_node.secondary_ip == primary_ip or
3353
          existing_node.primary_ip == secondary_ip or
3354
          existing_node.secondary_ip == secondary_ip):
3355
        raise errors.OpPrereqError("New node ip address(es) conflict with"
3356
                                   " existing node %s" % existing_node.name,
3357
                                   errors.ECODE_NOTUNIQUE)
3358

    
3359
    # check that the type of the node (single versus dual homed) is the
3360
    # same as for the master
3361
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
3362
    master_singlehomed = myself.secondary_ip == myself.primary_ip
3363
    newbie_singlehomed = secondary_ip == primary_ip
3364
    if master_singlehomed != newbie_singlehomed:
3365
      if master_singlehomed:
3366
        raise errors.OpPrereqError("The master has no private ip but the"
3367
                                   " new node has one",
3368
                                   errors.ECODE_INVAL)
3369
      else:
3370
        raise errors.OpPrereqError("The master has a private ip but the"
3371
                                   " new node doesn't have one",
3372
                                   errors.ECODE_INVAL)
3373

    
3374
    # checks reachability
3375
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
3376
      raise errors.OpPrereqError("Node not reachable by ping",
3377
                                 errors.ECODE_ENVIRON)
3378

    
3379
    if not newbie_singlehomed:
3380
      # check reachability from my secondary ip to newbie's secondary ip
3381
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
3382
                           source=myself.secondary_ip):
3383
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
3384
                                   " based ping to noded port",
3385
                                   errors.ECODE_ENVIRON)
3386

    
3387
    if self.op.readd:
3388
      exceptions = [node]
3389
    else:
3390
      exceptions = []
3391

    
3392
    self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
3393

    
3394
    if self.op.readd:
3395
      self.new_node = self.cfg.GetNodeInfo(node)
3396
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
3397
    else:
3398
      self.new_node = objects.Node(name=node,
3399
                                   primary_ip=primary_ip,
3400
                                   secondary_ip=secondary_ip,
3401
                                   master_candidate=self.master_candidate,
3402
                                   offline=False, drained=False)
3403

    
3404
  def Exec(self, feedback_fn):
3405
    """Adds the new node to the cluster.
3406

3407
    """
3408
    new_node = self.new_node
3409
    node = new_node.name
3410

    
3411
    # for re-adds, reset the offline/drained/master-candidate flags;
3412
    # we need to reset here, otherwise offline would prevent RPC calls
3413
    # later in the procedure; this also means that if the re-add
3414
    # fails, we are left with a non-offlined, broken node
3415
    if self.op.readd:
3416
      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
3417
      self.LogInfo("Readding a node, the offline/drained flags were reset")
3418
      # if we demote the node, we do cleanup later in the procedure
3419
      new_node.master_candidate = self.master_candidate
3420
      if self.changed_primary_ip:
3421
        new_node.primary_ip = self.op.primary_ip
3422

    
3423
    # notify the user about any possible mc promotion
3424
    if new_node.master_candidate:
3425
      self.LogInfo("Node will be a master candidate")
3426

    
3427
    # check connectivity
3428
    result = self.rpc.call_version([node])[node]
3429
    result.Raise("Can't get version information from node %s" % node)
3430
    if constants.PROTOCOL_VERSION == result.payload:
3431
      logging.info("Communication to node %s fine, sw version %s match",
3432
                   node, result.payload)
3433
    else:
3434
      raise errors.OpExecError("Version mismatch master version %s,"
3435
                               " node version %s" %
3436
                               (constants.PROTOCOL_VERSION, result.payload))
3437

    
3438
    # setup ssh on node
3439
    if self.cfg.GetClusterInfo().modify_ssh_setup:
3440
      logging.info("Copy ssh key to node %s", node)
3441
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
3442
      keyarray = []
3443
      keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
3444
                  constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
3445
                  priv_key, pub_key]
3446

    
3447
      for i in keyfiles:
3448
        keyarray.append(utils.ReadFile(i))
3449

    
3450
      result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
3451
                                      keyarray[2], keyarray[3], keyarray[4],
3452
                                      keyarray[5])
3453
      result.Raise("Cannot transfer ssh keys to the new node")
3454

    
3455
    # Add node to our /etc/hosts, and add key to known_hosts
3456
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3457
      # FIXME: this should be done via an rpc call to node daemon
3458
      utils.AddHostToEtcHosts(new_node.name)
3459

    
3460
    if new_node.secondary_ip != new_node.primary_ip:
3461
      result = self.rpc.call_node_has_ip_address(new_node.name,
3462
                                                 new_node.secondary_ip)
3463
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
3464
                   prereq=True, ecode=errors.ECODE_ENVIRON)
3465
      if not result.payload:
3466
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
3467
                                 " you gave (%s). Please fix and re-run this"
3468
                                 " command." % new_node.secondary_ip)
3469

    
3470
    node_verify_list = [self.cfg.GetMasterNode()]
3471
    node_verify_param = {
3472
      constants.NV_NODELIST: [node],
3473
      # TODO: do a node-net-test as well?
3474
    }
3475

    
3476
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
3477
                                       self.cfg.GetClusterName())
3478
    for verifier in node_verify_list:
3479
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
3480
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
3481
      if nl_payload:
3482
        for failed in nl_payload:
3483
          feedback_fn("ssh/hostname verification failed"
3484
                      " (checking from %s): %s" %
3485
                      (verifier, nl_payload[failed]))
3486
        raise errors.OpExecError("ssh/hostname verification failed.")
3487

    
3488
    if self.op.readd:
3489
      _RedistributeAncillaryFiles(self)
3490
      self.context.ReaddNode(new_node)
3491
      # make sure we redistribute the config
3492
      self.cfg.Update(new_node, feedback_fn)
3493
      # and make sure the new node will not have old files around
3494
      if not new_node.master_candidate:
3495
        result = self.rpc.call_node_demote_from_mc(new_node.name)
3496
        msg = result.fail_msg
3497
        if msg:
3498
          self.LogWarning("Node failed to demote itself from master"
3499
                          " candidate status: %s" % msg)
3500
    else:
3501
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
3502
      self.context.AddNode(new_node, self.proc.GetECId())
3503

    
3504

    
3505
class LUSetNodeParams(LogicalUnit):
3506
  """Modifies the parameters of a node.
3507

3508
  """
3509
  HPATH = "node-modify"
3510
  HTYPE = constants.HTYPE_NODE
3511
  _OP_REQP = ["node_name"]
3512
  REQ_BGL = False
3513

    
3514
  def CheckArguments(self):
3515
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3516
    _CheckBooleanOpField(self.op, 'master_candidate')
3517
    _CheckBooleanOpField(self.op, 'offline')
3518
    _CheckBooleanOpField(self.op, 'drained')
3519
    _CheckBooleanOpField(self.op, 'auto_promote')
3520
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
3521
    if all_mods.count(None) == 3:
3522
      raise errors.OpPrereqError("Please pass at least one modification",
3523
                                 errors.ECODE_INVAL)
3524
    if all_mods.count(True) > 1:
3525
      raise errors.OpPrereqError("Can't set the node into more than one"
3526
                                 " state at the same time",
3527
                                 errors.ECODE_INVAL)
3528

    
3529
    # Boolean value that tells us whether we're offlining or draining the node
3530
    self.offline_or_drain = (self.op.offline == True or
3531
                             self.op.drained == True)
3532
    self.deoffline_or_drain = (self.op.offline == False or
3533
                               self.op.drained == False)
3534
    self.might_demote = (self.op.master_candidate == False or
3535
                         self.offline_or_drain)
3536

    
3537
    self.lock_all = self.op.auto_promote and self.might_demote
3538

    
3539

    
3540
  def ExpandNames(self):
3541
    if self.lock_all:
3542
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
3543
    else:
3544
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
3545

    
3546
  def BuildHooksEnv(self):
3547
    """Build hooks env.
3548

3549
    This runs on the master node.
3550

3551
    """
3552
    env = {
3553
      "OP_TARGET": self.op.node_name,
3554
      "MASTER_CANDIDATE": str(self.op.master_candidate),
3555
      "OFFLINE": str(self.op.offline),
3556
      "DRAINED": str(self.op.drained),
3557
      }
3558
    nl = [self.cfg.GetMasterNode(),
3559
          self.op.node_name]
3560
    return env, nl, nl
3561

    
3562
  def CheckPrereq(self):
3563
    """Check prerequisites.
3564

3565
    This only checks the instance list against the existing names.
3566

3567
    """
3568
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
3569

    
3570
    if (self.op.master_candidate is not None or
3571
        self.op.drained is not None or
3572
        self.op.offline is not None):
3573
      # we can't change the master's node flags
3574
      if self.op.node_name == self.cfg.GetMasterNode():
3575
        raise errors.OpPrereqError("The master role can be changed"
3576
                                   " only via masterfailover",
3577
                                   errors.ECODE_INVAL)
3578

    
3579

    
3580
    if node.master_candidate and self.might_demote and not self.lock_all:
3581
      assert not self.op.auto_promote, "auto-promote set but lock_all not"
3582
      # check if after removing the current node, we're missing master
3583
      # candidates
3584
      (mc_remaining, mc_should, _) = \
3585
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
3586
      if mc_remaining < mc_should:
3587
        raise errors.OpPrereqError("Not enough master candidates, please"
3588
                                   " pass auto_promote to allow promotion",
3589
                                   errors.ECODE_INVAL)
3590

    
3591
    if (self.op.master_candidate == True and
3592
        ((node.offline and not self.op.offline == False) or
3593
         (node.drained and not self.op.drained == False))):
3594
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
3595
                                 " to master_candidate" % node.name,
3596
                                 errors.ECODE_INVAL)
3597

    
3598
    # If we're being deofflined/drained, we'll MC ourself if needed
3599
    if (self.deoffline_or_drain and not self.offline_or_drain and not
3600
        self.op.master_candidate == True and not node.master_candidate):
3601
      self.op.master_candidate = _DecideSelfPromotion(self)
3602
      if self.op.master_candidate:
3603
        self.LogInfo("Autopromoting node to master candidate")
3604

    
3605
    return
3606

    
3607
  def Exec(self, feedback_fn):
3608
    """Modifies a node.
3609

3610
    """
3611
    node = self.node
3612

    
3613
    result = []
3614
    changed_mc = False
3615

    
3616
    if self.op.offline is not None:
3617
      node.offline = self.op.offline
3618
      result.append(("offline", str(self.op.offline)))
3619
      if self.op.offline == True:
3620
        if node.master_candidate:
3621
          node.master_candidate = False
3622
          changed_mc = True
3623
          result.append(("master_candidate", "auto-demotion due to offline"))
3624
        if node.drained:
3625
          node.drained = False
3626
          result.append(("drained", "clear drained status due to offline"))
3627

    
3628
    if self.op.master_candidate is not None:
3629
      node.master_candidate = self.op.master_candidate
3630
      changed_mc = True
3631
      result.append(("master_candidate", str(self.op.master_candidate)))
3632
      if self.op.master_candidate == False:
3633
        rrc = self.rpc.call_node_demote_from_mc(node.name)
3634
        msg = rrc.fail_msg
3635
        if msg:
3636
          self.LogWarning("Node failed to demote itself: %s" % msg)
3637

    
3638
    if self.op.drained is not None:
3639
      node.drained = self.op.drained
3640
      result.append(("drained", str(self.op.drained)))
3641
      if self.op.drained == True:
3642
        if node.master_candidate:
3643
          node.master_candidate = False
3644
          changed_mc = True
3645
          result.append(("master_candidate", "auto-demotion due to drain"))
3646
          rrc = self.rpc.call_node_demote_from_mc(node.name)
3647
          msg = rrc.fail_msg
3648
          if msg:
3649
            self.LogWarning("Node failed to demote itself: %s" % msg)
3650
        if node.offline:
3651
          node.offline = False
3652
          result.append(("offline", "clear offline status due to drain"))
3653

    
3654
    # we locked all nodes, we adjust the CP before updating this node
3655
    if self.lock_all:
3656
      _AdjustCandidatePool(self, [node.name])
3657

    
3658
    # this will trigger configuration file update, if needed
3659
    self.cfg.Update(node, feedback_fn)
3660

    
3661
    # this will trigger job queue propagation or cleanup
3662
    if changed_mc:
3663
      self.context.ReaddNode(node)
3664

    
3665
    return result
3666

    
3667

    
3668
class LUPowercycleNode(NoHooksLU):
3669
  """Powercycles a node.
3670

3671
  """
3672
  _OP_REQP = ["node_name", "force"]
3673
  REQ_BGL = False
3674

    
3675
  def CheckArguments(self):
3676
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3677
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
3678
      raise errors.OpPrereqError("The node is the master and the force"
3679
                                 " parameter was not set",
3680
                                 errors.ECODE_INVAL)
3681

    
3682
  def ExpandNames(self):
3683
    """Locking for PowercycleNode.
3684

3685
    This is a last-resort option and shouldn't block on other
3686
    jobs. Therefore, we grab no locks.
3687

3688
    """
3689
    self.needed_locks = {}
3690

    
3691
  def CheckPrereq(self):
3692
    """Check prerequisites.
3693

3694
    This LU has no prereqs.
3695

3696
    """
3697
    pass
3698

    
3699
  def Exec(self, feedback_fn):
3700
    """Reboots a node.
3701

3702
    """
3703
    result = self.rpc.call_node_powercycle(self.op.node_name,
3704
                                           self.cfg.GetHypervisorType())
3705
    result.Raise("Failed to schedule the reboot")
3706
    return result.payload
3707

    
3708

    
3709
class LUQueryClusterInfo(NoHooksLU):
3710
  """Query cluster configuration.
3711

3712
  """
3713
  _OP_REQP = []
3714
  REQ_BGL = False
3715

    
3716
  def ExpandNames(self):
3717
    self.needed_locks = {}
3718

    
3719
  def CheckPrereq(self):
3720
    """No prerequsites needed for this LU.
3721

3722
    """
3723
    pass
3724

    
3725
  def Exec(self, feedback_fn):
3726
    """Return cluster config.
3727

3728
    """
3729
    cluster = self.cfg.GetClusterInfo()
3730
    os_hvp = {}
3731

    
3732
    # Filter just for enabled hypervisors
3733
    for os_name, hv_dict in cluster.os_hvp.items():
3734
      os_hvp[os_name] = {}
3735
      for hv_name, hv_params in hv_dict.items():
3736
        if hv_name in cluster.enabled_hypervisors:
3737
          os_hvp[os_name][hv_name] = hv_params
3738

    
3739
    result = {
3740
      "software_version": constants.RELEASE_VERSION,
3741
      "protocol_version": constants.PROTOCOL_VERSION,
3742
      "config_version": constants.CONFIG_VERSION,
3743
      "os_api_version": max(constants.OS_API_VERSIONS),
3744
      "export_version": constants.EXPORT_VERSION,
3745
      "architecture": (platform.architecture()[0], platform.machine()),
3746
      "name": cluster.cluster_name,
3747
      "master": cluster.master_node,
3748
      "default_hypervisor": cluster.enabled_hypervisors[0],
3749
      "enabled_hypervisors": cluster.enabled_hypervisors,
3750
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
3751
                        for hypervisor_name in cluster.enabled_hypervisors]),
3752
      "os_hvp": os_hvp,
3753
      "beparams": cluster.beparams,
3754
      "nicparams": cluster.nicparams,
3755
      "candidate_pool_size": cluster.candidate_pool_size,
3756
      "master_netdev": cluster.master_netdev,
3757
      "volume_group_name": cluster.volume_group_name,
3758
      "file_storage_dir": cluster.file_storage_dir,
3759
      "maintain_node_health": cluster.maintain_node_health,
3760
      "ctime": cluster.ctime,
3761
      "mtime": cluster.mtime,
3762
      "uuid": cluster.uuid,
3763
      "tags": list(cluster.GetTags()),
3764
      "uid_pool": cluster.uid_pool,
3765
      }
3766

    
3767
    return result
3768

    
3769

    
3770
class LUQueryConfigValues(NoHooksLU):
3771
  """Return configuration values.
3772

3773
  """
3774
  _OP_REQP = []
3775
  REQ_BGL = False
3776
  _FIELDS_DYNAMIC = utils.FieldSet()
3777
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
3778
                                  "watcher_pause")
3779

    
3780
  def ExpandNames(self):
3781
    self.needed_locks = {}
3782

    
3783
    _CheckOutputFields(static=self._FIELDS_STATIC,
3784
                       dynamic=self._FIELDS_DYNAMIC,
3785
                       selected=self.op.output_fields)
3786

    
3787
  def CheckPrereq(self):
3788
    """No prerequisites.
3789

3790
    """
3791
    pass
3792

    
3793
  def Exec(self, feedback_fn):
3794
    """Dump a representation of the cluster config to the standard output.
3795

3796
    """
3797
    values = []
3798
    for field in self.op.output_fields:
3799
      if field == "cluster_name":
3800
        entry = self.cfg.GetClusterName()
3801
      elif field == "master_node":
3802
        entry = self.cfg.GetMasterNode()
3803
      elif field == "drain_flag":
3804
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
3805
      elif field == "watcher_pause":
3806
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
3807
      else:
3808
        raise errors.ParameterError(field)
3809
      values.append(entry)
3810
    return values
3811

    
3812

    
3813
class LUActivateInstanceDisks(NoHooksLU):
3814
  """Bring up an instance's disks.
3815

3816
  """
3817
  _OP_REQP = ["instance_name"]
3818
  REQ_BGL = False
3819

    
3820
  def ExpandNames(self):
3821
    self._ExpandAndLockInstance()
3822
    self.needed_locks[locking.LEVEL_NODE] = []
3823
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3824

    
3825
  def DeclareLocks(self, level):
3826
    if level == locking.LEVEL_NODE:
3827
      self._LockInstancesNodes()
3828

    
3829
  def CheckPrereq(self):
3830
    """Check prerequisites.
3831

3832
    This checks that the instance is in the cluster.
3833

3834
    """
3835
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3836
    assert self.instance is not None, \
3837
      "Cannot retrieve locked instance %s" % self.op.instance_name
3838
    _CheckNodeOnline(self, self.instance.primary_node)
3839
    if not hasattr(self.op, "ignore_size"):
3840
      self.op.ignore_size = False
3841

    
3842
  def Exec(self, feedback_fn):
3843
    """Activate the disks.
3844

3845
    """
3846
    disks_ok, disks_info = \
3847
              _AssembleInstanceDisks(self, self.instance,
3848
                                     ignore_size=self.op.ignore_size)
3849
    if not disks_ok:
3850
      raise errors.OpExecError("Cannot activate block devices")
3851

    
3852
    return disks_info
3853

    
3854

    
3855
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False,
3856
                           ignore_size=False):
3857
  """Prepare the block devices for an instance.
3858

3859
  This sets up the block devices on all nodes.
3860

3861
  @type lu: L{LogicalUnit}
3862
  @param lu: the logical unit on whose behalf we execute
3863
  @type instance: L{objects.Instance}
3864
  @param instance: the instance for whose disks we assemble
3865
  @type ignore_secondaries: boolean
3866
  @param ignore_secondaries: if true, errors on secondary nodes
3867
      won't result in an error return from the function
3868
  @type ignore_size: boolean
3869
  @param ignore_size: if true, the current known size of the disk
3870
      will not be used during the disk activation, useful for cases
3871
      when the size is wrong
3872
  @return: False if the operation failed, otherwise a list of
3873
      (host, instance_visible_name, node_visible_name)
3874
      with the mapping from node devices to instance devices
3875

3876
  """
3877
  device_info = []
3878
  disks_ok = True
3879
  iname = instance.name
3880
  # With the two passes mechanism we try to reduce the window of
3881
  # opportunity for the race condition of switching DRBD to primary
3882
  # before handshaking occured, but we do not eliminate it
3883

    
3884
  # The proper fix would be to wait (with some limits) until the
3885
  # connection has been made and drbd transitions from WFConnection
3886
  # into any other network-connected state (Connected, SyncTarget,
3887
  # SyncSource, etc.)
3888

    
3889
  # 1st pass, assemble on all nodes in secondary mode
3890
  for inst_disk in instance.disks:
3891
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3892
      if ignore_size:
3893
        node_disk = node_disk.Copy()
3894
        node_disk.UnsetSize()
3895
      lu.cfg.SetDiskID(node_disk, node)
3896
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
3897
      msg = result.fail_msg
3898
      if msg:
3899
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3900
                           " (is_primary=False, pass=1): %s",
3901
                           inst_disk.iv_name, node, msg)
3902
        if not ignore_secondaries:
3903
          disks_ok = False
3904

    
3905
  # FIXME: race condition on drbd migration to primary
3906

    
3907
  # 2nd pass, do only the primary node
3908
  for inst_disk in instance.disks:
3909
    dev_path = None
3910

    
3911
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3912
      if node != instance.primary_node:
3913
        continue
3914
      if ignore_size:
3915
        node_disk = node_disk.Copy()
3916
        node_disk.UnsetSize()
3917
      lu.cfg.SetDiskID(node_disk, node)
3918
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
3919
      msg = result.fail_msg
3920
      if msg:
3921
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3922
                           " (is_primary=True, pass=2): %s",
3923
                           inst_disk.iv_name, node, msg)
3924
        disks_ok = False
3925
      else:
3926
        dev_path = result.payload
3927

    
3928
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
3929

    
3930
  # leave the disks configured for the primary node
3931
  # this is a workaround that would be fixed better by
3932
  # improving the logical/physical id handling
3933
  for disk in instance.disks:
3934
    lu.cfg.SetDiskID(disk, instance.primary_node)
3935

    
3936
  return disks_ok, device_info
3937

    
3938

    
3939
def _StartInstanceDisks(lu, instance, force):
3940
  """Start the disks of an instance.
3941

3942
  """
3943
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
3944
                                           ignore_secondaries=force)
3945
  if not disks_ok:
3946
    _ShutdownInstanceDisks(lu, instance)
3947
    if force is not None and not force:
3948
      lu.proc.LogWarning("", hint="If the message above refers to a"
3949
                         " secondary node,"
3950
                         " you can retry the operation using '--force'.")
3951
    raise errors.OpExecError("Disk consistency error")
3952

    
3953

    
3954
class LUDeactivateInstanceDisks(NoHooksLU):
3955
  """Shutdown an instance's disks.
3956

3957
  """
3958
  _OP_REQP = ["instance_name"]
3959
  REQ_BGL = False
3960

    
3961
  def ExpandNames(self):
3962
    self._ExpandAndLockInstance()
3963
    self.needed_locks[locking.LEVEL_NODE] = []
3964
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3965

    
3966
  def DeclareLocks(self, level):
3967
    if level == locking.LEVEL_NODE:
3968
      self._LockInstancesNodes()
3969

    
3970
  def CheckPrereq(self):
3971
    """Check prerequisites.
3972

3973
    This checks that the instance is in the cluster.
3974

3975
    """
3976
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3977
    assert self.instance is not None, \
3978
      "Cannot retrieve locked instance %s" % self.op.instance_name
3979

    
3980
  def Exec(self, feedback_fn):
3981
    """Deactivate the disks
3982

3983
    """
3984
    instance = self.instance
3985
    _SafeShutdownInstanceDisks(self, instance)
3986

    
3987

    
3988
def _SafeShutdownInstanceDisks(lu, instance):
3989
  """Shutdown block devices of an instance.
3990

3991
  This function checks if an instance is running, before calling
3992
  _ShutdownInstanceDisks.
3993

3994
  """
3995
  _CheckInstanceDown(lu, instance, "cannot shutdown disks")
3996
  _ShutdownInstanceDisks(lu, instance)
3997

    
3998

    
3999
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
4000
  """Shutdown block devices of an instance.
4001

4002
  This does the shutdown on all nodes of the instance.
4003

4004
  If the ignore_primary is false, errors on the primary node are
4005
  ignored.
4006

4007
  """
4008
  all_result = True
4009
  for disk in instance.disks:
4010
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
4011
      lu.cfg.SetDiskID(top_disk, node)
4012
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
4013
      msg = result.fail_msg
4014
      if msg:
4015
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
4016
                      disk.iv_name, node, msg)
4017
        if not ignore_primary or node != instance.primary_node:
4018
          all_result = False
4019
  return all_result
4020

    
4021

    
4022