Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ f38ea602

History | View | Annotate | Download (364.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0201,C0302
25

    
26
# W0201 since most LU attributes are defined in CheckPrereq or similar
27
# functions
28

    
29
# C0302: since we have waaaay to many lines in this module
30

    
31
import os
32
import os.path
33
import time
34
import re
35
import platform
36
import logging
37
import copy
38
import OpenSSL
39
import socket
40
import tempfile
41
import shutil
42

    
43
from ganeti import ssh
44
from ganeti import utils
45
from ganeti import errors
46
from ganeti import hypervisor
47
from ganeti import locking
48
from ganeti import constants
49
from ganeti import objects
50
from ganeti import serializer
51
from ganeti import ssconf
52
from ganeti import uidpool
53
from ganeti import compat
54
from ganeti import masterd
55
from ganeti import netutils
56

    
57
import ganeti.masterd.instance # pylint: disable-msg=W0611
58

    
59

    
60
# Modifiable default values; need to define these here before the
61
# actual LUs
62

    
63
def _EmptyList():
64
  """Returns an empty list.
65

66
  """
67
  return []
68

    
69

    
70
def _EmptyDict():
71
  """Returns an empty dict.
72

73
  """
74
  return {}
75

    
76

    
77
#: The without-default default value
78
_NoDefault = object()
79

    
80

    
81
#: The no-type (value to complex to check it in the type system)
82
_NoType = object()
83

    
84

    
85
# Some basic types
86
def _TNotNone(val):
87
  """Checks if the given value is not None.
88

89
  """
90
  return val is not None
91

    
92

    
93
def _TNone(val):
94
  """Checks if the given value is None.
95

96
  """
97
  return val is None
98

    
99

    
100
def _TBool(val):
101
  """Checks if the given value is a boolean.
102

103
  """
104
  return isinstance(val, bool)
105

    
106

    
107
def _TInt(val):
108
  """Checks if the given value is an integer.
109

110
  """
111
  return isinstance(val, int)
112

    
113

    
114
def _TFloat(val):
115
  """Checks if the given value is a float.
116

117
  """
118
  return isinstance(val, float)
119

    
120

    
121
def _TString(val):
122
  """Checks if the given value is a string.
123

124
  """
125
  return isinstance(val, basestring)
126

    
127

    
128
def _TTrue(val):
129
  """Checks if a given value evaluates to a boolean True value.
130

131
  """
132
  return bool(val)
133

    
134

    
135
def _TElemOf(target_list):
136
  """Builds a function that checks if a given value is a member of a list.
137

138
  """
139
  return lambda val: val in target_list
140

    
141

    
142
# Container types
143
def _TList(val):
144
  """Checks if the given value is a list.
145

146
  """
147
  return isinstance(val, list)
148

    
149

    
150
def _TDict(val):
151
  """Checks if the given value is a dictionary.
152

153
  """
154
  return isinstance(val, dict)
155

    
156

    
157
# Combinator types
158
def _TAnd(*args):
159
  """Combine multiple functions using an AND operation.
160

161
  """
162
  def fn(val):
163
    return compat.all(t(val) for t in args)
164
  return fn
165

    
166

    
167
def _TOr(*args):
168
  """Combine multiple functions using an AND operation.
169

170
  """
171
  def fn(val):
172
    return compat.any(t(val) for t in args)
173
  return fn
174

    
175

    
176
# Type aliases
177

    
178
#: a non-empty string
179
_TNonEmptyString = _TAnd(_TString, _TTrue)
180

    
181

    
182
#: a maybe non-empty string
183
_TMaybeString = _TOr(_TNonEmptyString, _TNone)
184

    
185

    
186
#: a maybe boolean (bool or none)
187
_TMaybeBool = _TOr(_TBool, _TNone)
188

    
189

    
190
#: a positive integer
191
_TPositiveInt = _TAnd(_TInt, lambda v: v >= 0)
192

    
193
#: a strictly positive integer
194
_TStrictPositiveInt = _TAnd(_TInt, lambda v: v > 0)
195

    
196

    
197
def _TListOf(my_type):
198
  """Checks if a given value is a list with all elements of the same type.
199

200
  """
201
  return _TAnd(_TList,
202
               lambda lst: compat.all(my_type(v) for v in lst))
203

    
204

    
205
def _TDictOf(key_type, val_type):
206
  """Checks a dict type for the type of its key/values.
207

208
  """
209
  return _TAnd(_TDict,
210
               lambda my_dict: (compat.all(key_type(v) for v in my_dict.keys())
211
                                and compat.all(val_type(v)
212
                                               for v in my_dict.values())))
213

    
214

    
215
# Common opcode attributes
216

    
217
#: output fields for a query operation
218
_POutputFields = ("output_fields", _NoDefault, _TListOf(_TNonEmptyString))
219

    
220

    
221
#: the shutdown timeout
222
_PShutdownTimeout = ("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT,
223
                     _TPositiveInt)
224

    
225
#: the force parameter
226
_PForce = ("force", False, _TBool)
227

    
228
#: a required instance name (for single-instance LUs)
229
_PInstanceName = ("instance_name", _NoDefault, _TNonEmptyString)
230

    
231

    
232
#: a required node name (for single-node LUs)
233
_PNodeName = ("node_name", _NoDefault, _TNonEmptyString)
234

    
235
#: the migration type (live/non-live)
236
_PMigrationMode = ("mode", None, _TOr(_TNone,
237
                                      _TElemOf(constants.HT_MIGRATION_MODES)))
238

    
239

    
240
# End types
241
class LogicalUnit(object):
242
  """Logical Unit base class.
243

244
  Subclasses must follow these rules:
245
    - implement ExpandNames
246
    - implement CheckPrereq (except when tasklets are used)
247
    - implement Exec (except when tasklets are used)
248
    - implement BuildHooksEnv
249
    - redefine HPATH and HTYPE
250
    - optionally redefine their run requirements:
251
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
252

253
  Note that all commands require root permissions.
254

255
  @ivar dry_run_result: the value (if any) that will be returned to the caller
256
      in dry-run mode (signalled by opcode dry_run parameter)
257
  @cvar _OP_PARAMS: a list of opcode attributes, their defaults values
258
      they should get if not already defined, and types they must match
259

260
  """
261
  HPATH = None
262
  HTYPE = None
263
  _OP_PARAMS = []
264
  REQ_BGL = True
265

    
266
  def __init__(self, processor, op, context, rpc):
267
    """Constructor for LogicalUnit.
268

269
    This needs to be overridden in derived classes in order to check op
270
    validity.
271

272
    """
273
    self.proc = processor
274
    self.op = op
275
    self.cfg = context.cfg
276
    self.context = context
277
    self.rpc = rpc
278
    # Dicts used to declare locking needs to mcpu
279
    self.needed_locks = None
280
    self.acquired_locks = {}
281
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
282
    self.add_locks = {}
283
    self.remove_locks = {}
284
    # Used to force good behavior when calling helper functions
285
    self.recalculate_locks = {}
286
    self.__ssh = None
287
    # logging
288
    self.Log = processor.Log # pylint: disable-msg=C0103
289
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
290
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
291
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
292
    # support for dry-run
293
    self.dry_run_result = None
294
    # support for generic debug attribute
295
    if (not hasattr(self.op, "debug_level") or
296
        not isinstance(self.op.debug_level, int)):
297
      self.op.debug_level = 0
298

    
299
    # Tasklets
300
    self.tasklets = None
301

    
302
    # The new kind-of-type-system
303
    op_id = self.op.OP_ID
304
    for attr_name, aval, test in self._OP_PARAMS:
305
      if not hasattr(op, attr_name):
306
        if aval == _NoDefault:
307
          raise errors.OpPrereqError("Required parameter '%s.%s' missing" %
308
                                     (op_id, attr_name), errors.ECODE_INVAL)
309
        else:
310
          if callable(aval):
311
            dval = aval()
312
          else:
313
            dval = aval
314
          setattr(self.op, attr_name, dval)
315
      attr_val = getattr(op, attr_name)
316
      if test == _NoType:
317
        # no tests here
318
        continue
319
      if not callable(test):
320
        raise errors.ProgrammerError("Validation for parameter '%s.%s' failed,"
321
                                     " given type is not a proper type (%s)" %
322
                                     (op_id, attr_name, test))
323
      if not test(attr_val):
324
        logging.error("OpCode %s, parameter %s, has invalid type %s/value %s",
325
                      self.op.OP_ID, attr_name, type(attr_val), attr_val)
326
        raise errors.OpPrereqError("Parameter '%s.%s' fails validation" %
327
                                   (op_id, attr_name), errors.ECODE_INVAL)
328

    
329
    self.CheckArguments()
330

    
331
  def __GetSSH(self):
332
    """Returns the SshRunner object
333

334
    """
335
    if not self.__ssh:
336
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
337
    return self.__ssh
338

    
339
  ssh = property(fget=__GetSSH)
340

    
341
  def CheckArguments(self):
342
    """Check syntactic validity for the opcode arguments.
343

344
    This method is for doing a simple syntactic check and ensure
345
    validity of opcode parameters, without any cluster-related
346
    checks. While the same can be accomplished in ExpandNames and/or
347
    CheckPrereq, doing these separate is better because:
348

349
      - ExpandNames is left as as purely a lock-related function
350
      - CheckPrereq is run after we have acquired locks (and possible
351
        waited for them)
352

353
    The function is allowed to change the self.op attribute so that
354
    later methods can no longer worry about missing parameters.
355

356
    """
357
    pass
358

    
359
  def ExpandNames(self):
360
    """Expand names for this LU.
361

362
    This method is called before starting to execute the opcode, and it should
363
    update all the parameters of the opcode to their canonical form (e.g. a
364
    short node name must be fully expanded after this method has successfully
365
    completed). This way locking, hooks, logging, ecc. can work correctly.
366

367
    LUs which implement this method must also populate the self.needed_locks
368
    member, as a dict with lock levels as keys, and a list of needed lock names
369
    as values. Rules:
370

371
      - use an empty dict if you don't need any lock
372
      - if you don't need any lock at a particular level omit that level
373
      - don't put anything for the BGL level
374
      - if you want all locks at a level use locking.ALL_SET as a value
375

376
    If you need to share locks (rather than acquire them exclusively) at one
377
    level you can modify self.share_locks, setting a true value (usually 1) for
378
    that level. By default locks are not shared.
379

380
    This function can also define a list of tasklets, which then will be
381
    executed in order instead of the usual LU-level CheckPrereq and Exec
382
    functions, if those are not defined by the LU.
383

384
    Examples::
385

386
      # Acquire all nodes and one instance
387
      self.needed_locks = {
388
        locking.LEVEL_NODE: locking.ALL_SET,
389
        locking.LEVEL_INSTANCE: ['instance1.example.com'],
390
      }
391
      # Acquire just two nodes
392
      self.needed_locks = {
393
        locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
394
      }
395
      # Acquire no locks
396
      self.needed_locks = {} # No, you can't leave it to the default value None
397

398
    """
399
    # The implementation of this method is mandatory only if the new LU is
400
    # concurrent, so that old LUs don't need to be changed all at the same
401
    # time.
402
    if self.REQ_BGL:
403
      self.needed_locks = {} # Exclusive LUs don't need locks.
404
    else:
405
      raise NotImplementedError
406

    
407
  def DeclareLocks(self, level):
408
    """Declare LU locking needs for a level
409

410
    While most LUs can just declare their locking needs at ExpandNames time,
411
    sometimes there's the need to calculate some locks after having acquired
412
    the ones before. This function is called just before acquiring locks at a
413
    particular level, but after acquiring the ones at lower levels, and permits
414
    such calculations. It can be used to modify self.needed_locks, and by
415
    default it does nothing.
416

417
    This function is only called if you have something already set in
418
    self.needed_locks for the level.
419

420
    @param level: Locking level which is going to be locked
421
    @type level: member of ganeti.locking.LEVELS
422

423
    """
424

    
425
  def CheckPrereq(self):
426
    """Check prerequisites for this LU.
427

428
    This method should check that the prerequisites for the execution
429
    of this LU are fulfilled. It can do internode communication, but
430
    it should be idempotent - no cluster or system changes are
431
    allowed.
432

433
    The method should raise errors.OpPrereqError in case something is
434
    not fulfilled. Its return value is ignored.
435

436
    This method should also update all the parameters of the opcode to
437
    their canonical form if it hasn't been done by ExpandNames before.
438

439
    """
440
    if self.tasklets is not None:
441
      for (idx, tl) in enumerate(self.tasklets):
442
        logging.debug("Checking prerequisites for tasklet %s/%s",
443
                      idx + 1, len(self.tasklets))
444
        tl.CheckPrereq()
445
    else:
446
      pass
447

    
448
  def Exec(self, feedback_fn):
449
    """Execute the LU.
450

451
    This method should implement the actual work. It should raise
452
    errors.OpExecError for failures that are somewhat dealt with in
453
    code, or expected.
454

455
    """
456
    if self.tasklets is not None:
457
      for (idx, tl) in enumerate(self.tasklets):
458
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
459
        tl.Exec(feedback_fn)
460
    else:
461
      raise NotImplementedError
462

    
463
  def BuildHooksEnv(self):
464
    """Build hooks environment for this LU.
465

466
    This method should return a three-node tuple consisting of: a dict
467
    containing the environment that will be used for running the
468
    specific hook for this LU, a list of node names on which the hook
469
    should run before the execution, and a list of node names on which
470
    the hook should run after the execution.
471

472
    The keys of the dict must not have 'GANETI_' prefixed as this will
473
    be handled in the hooks runner. Also note additional keys will be
474
    added by the hooks runner. If the LU doesn't define any
475
    environment, an empty dict (and not None) should be returned.
476

477
    No nodes should be returned as an empty list (and not None).
478

479
    Note that if the HPATH for a LU class is None, this function will
480
    not be called.
481

482
    """
483
    raise NotImplementedError
484

    
485
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
486
    """Notify the LU about the results of its hooks.
487

488
    This method is called every time a hooks phase is executed, and notifies
489
    the Logical Unit about the hooks' result. The LU can then use it to alter
490
    its result based on the hooks.  By default the method does nothing and the
491
    previous result is passed back unchanged but any LU can define it if it
492
    wants to use the local cluster hook-scripts somehow.
493

494
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
495
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
496
    @param hook_results: the results of the multi-node hooks rpc call
497
    @param feedback_fn: function used send feedback back to the caller
498
    @param lu_result: the previous Exec result this LU had, or None
499
        in the PRE phase
500
    @return: the new Exec result, based on the previous result
501
        and hook results
502

503
    """
504
    # API must be kept, thus we ignore the unused argument and could
505
    # be a function warnings
506
    # pylint: disable-msg=W0613,R0201
507
    return lu_result
508

    
509
  def _ExpandAndLockInstance(self):
510
    """Helper function to expand and lock an instance.
511

512
    Many LUs that work on an instance take its name in self.op.instance_name
513
    and need to expand it and then declare the expanded name for locking. This
514
    function does it, and then updates self.op.instance_name to the expanded
515
    name. It also initializes needed_locks as a dict, if this hasn't been done
516
    before.
517

518
    """
519
    if self.needed_locks is None:
520
      self.needed_locks = {}
521
    else:
522
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
523
        "_ExpandAndLockInstance called with instance-level locks set"
524
    self.op.instance_name = _ExpandInstanceName(self.cfg,
525
                                                self.op.instance_name)
526
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
527

    
528
  def _LockInstancesNodes(self, primary_only=False):
529
    """Helper function to declare instances' nodes for locking.
530

531
    This function should be called after locking one or more instances to lock
532
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
533
    with all primary or secondary nodes for instances already locked and
534
    present in self.needed_locks[locking.LEVEL_INSTANCE].
535

536
    It should be called from DeclareLocks, and for safety only works if
537
    self.recalculate_locks[locking.LEVEL_NODE] is set.
538

539
    In the future it may grow parameters to just lock some instance's nodes, or
540
    to just lock primaries or secondary nodes, if needed.
541

542
    If should be called in DeclareLocks in a way similar to::
543

544
      if level == locking.LEVEL_NODE:
545
        self._LockInstancesNodes()
546

547
    @type primary_only: boolean
548
    @param primary_only: only lock primary nodes of locked instances
549

550
    """
551
    assert locking.LEVEL_NODE in self.recalculate_locks, \
552
      "_LockInstancesNodes helper function called with no nodes to recalculate"
553

    
554
    # TODO: check if we're really been called with the instance locks held
555

    
556
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
557
    # future we might want to have different behaviors depending on the value
558
    # of self.recalculate_locks[locking.LEVEL_NODE]
559
    wanted_nodes = []
560
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
561
      instance = self.context.cfg.GetInstanceInfo(instance_name)
562
      wanted_nodes.append(instance.primary_node)
563
      if not primary_only:
564
        wanted_nodes.extend(instance.secondary_nodes)
565

    
566
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
567
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
568
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
569
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
570

    
571
    del self.recalculate_locks[locking.LEVEL_NODE]
572

    
573

    
574
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
575
  """Simple LU which runs no hooks.
576

577
  This LU is intended as a parent for other LogicalUnits which will
578
  run no hooks, in order to reduce duplicate code.
579

580
  """
581
  HPATH = None
582
  HTYPE = None
583

    
584
  def BuildHooksEnv(self):
585
    """Empty BuildHooksEnv for NoHooksLu.
586

587
    This just raises an error.
588

589
    """
590
    assert False, "BuildHooksEnv called for NoHooksLUs"
591

    
592

    
593
class Tasklet:
594
  """Tasklet base class.
595

596
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
597
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
598
  tasklets know nothing about locks.
599

600
  Subclasses must follow these rules:
601
    - Implement CheckPrereq
602
    - Implement Exec
603

604
  """
605
  def __init__(self, lu):
606
    self.lu = lu
607

    
608
    # Shortcuts
609
    self.cfg = lu.cfg
610
    self.rpc = lu.rpc
611

    
612
  def CheckPrereq(self):
613
    """Check prerequisites for this tasklets.
614

615
    This method should check whether the prerequisites for the execution of
616
    this tasklet are fulfilled. It can do internode communication, but it
617
    should be idempotent - no cluster or system changes are allowed.
618

619
    The method should raise errors.OpPrereqError in case something is not
620
    fulfilled. Its return value is ignored.
621

622
    This method should also update all parameters to their canonical form if it
623
    hasn't been done before.
624

625
    """
626
    pass
627

    
628
  def Exec(self, feedback_fn):
629
    """Execute the tasklet.
630

631
    This method should implement the actual work. It should raise
632
    errors.OpExecError for failures that are somewhat dealt with in code, or
633
    expected.
634

635
    """
636
    raise NotImplementedError
637

    
638

    
639
def _GetWantedNodes(lu, nodes):
640
  """Returns list of checked and expanded node names.
641

642
  @type lu: L{LogicalUnit}
643
  @param lu: the logical unit on whose behalf we execute
644
  @type nodes: list
645
  @param nodes: list of node names or None for all nodes
646
  @rtype: list
647
  @return: the list of nodes, sorted
648
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
649

650
  """
651
  if not nodes:
652
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
653
      " non-empty list of nodes whose name is to be expanded.")
654

    
655
  wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes]
656
  return utils.NiceSort(wanted)
657

    
658

    
659
def _GetWantedInstances(lu, instances):
660
  """Returns list of checked and expanded instance names.
661

662
  @type lu: L{LogicalUnit}
663
  @param lu: the logical unit on whose behalf we execute
664
  @type instances: list
665
  @param instances: list of instance names or None for all instances
666
  @rtype: list
667
  @return: the list of instances, sorted
668
  @raise errors.OpPrereqError: if the instances parameter is wrong type
669
  @raise errors.OpPrereqError: if any of the passed instances is not found
670

671
  """
672
  if instances:
673
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
674
  else:
675
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
676
  return wanted
677

    
678

    
679
def _GetUpdatedParams(old_params, update_dict,
680
                      use_default=True, use_none=False):
681
  """Return the new version of a parameter dictionary.
682

683
  @type old_params: dict
684
  @param old_params: old parameters
685
  @type update_dict: dict
686
  @param update_dict: dict containing new parameter values, or
687
      constants.VALUE_DEFAULT to reset the parameter to its default
688
      value
689
  @param use_default: boolean
690
  @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
691
      values as 'to be deleted' values
692
  @param use_none: boolean
693
  @type use_none: whether to recognise C{None} values as 'to be
694
      deleted' values
695
  @rtype: dict
696
  @return: the new parameter dictionary
697

698
  """
699
  params_copy = copy.deepcopy(old_params)
700
  for key, val in update_dict.iteritems():
701
    if ((use_default and val == constants.VALUE_DEFAULT) or
702
        (use_none and val is None)):
703
      try:
704
        del params_copy[key]
705
      except KeyError:
706
        pass
707
    else:
708
      params_copy[key] = val
709
  return params_copy
710

    
711

    
712
def _CheckOutputFields(static, dynamic, selected):
713
  """Checks whether all selected fields are valid.
714

715
  @type static: L{utils.FieldSet}
716
  @param static: static fields set
717
  @type dynamic: L{utils.FieldSet}
718
  @param dynamic: dynamic fields set
719

720
  """
721
  f = utils.FieldSet()
722
  f.Extend(static)
723
  f.Extend(dynamic)
724

    
725
  delta = f.NonMatching(selected)
726
  if delta:
727
    raise errors.OpPrereqError("Unknown output fields selected: %s"
728
                               % ",".join(delta), errors.ECODE_INVAL)
729

    
730

    
731
def _CheckGlobalHvParams(params):
732
  """Validates that given hypervisor params are not global ones.
733

734
  This will ensure that instances don't get customised versions of
735
  global params.
736

737
  """
738
  used_globals = constants.HVC_GLOBALS.intersection(params)
739
  if used_globals:
740
    msg = ("The following hypervisor parameters are global and cannot"
741
           " be customized at instance level, please modify them at"
742
           " cluster level: %s" % utils.CommaJoin(used_globals))
743
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
744

    
745

    
746
def _CheckNodeOnline(lu, node):
747
  """Ensure that a given node is online.
748

749
  @param lu: the LU on behalf of which we make the check
750
  @param node: the node to check
751
  @raise errors.OpPrereqError: if the node is offline
752

753
  """
754
  if lu.cfg.GetNodeInfo(node).offline:
755
    raise errors.OpPrereqError("Can't use offline node %s" % node,
756
                               errors.ECODE_INVAL)
757

    
758

    
759
def _CheckNodeNotDrained(lu, node):
760
  """Ensure that a given node is not drained.
761

762
  @param lu: the LU on behalf of which we make the check
763
  @param node: the node to check
764
  @raise errors.OpPrereqError: if the node is drained
765

766
  """
767
  if lu.cfg.GetNodeInfo(node).drained:
768
    raise errors.OpPrereqError("Can't use drained node %s" % node,
769
                               errors.ECODE_INVAL)
770

    
771

    
772
def _CheckNodeHasOS(lu, node, os_name, force_variant):
773
  """Ensure that a node supports a given OS.
774

775
  @param lu: the LU on behalf of which we make the check
776
  @param node: the node to check
777
  @param os_name: the OS to query about
778
  @param force_variant: whether to ignore variant errors
779
  @raise errors.OpPrereqError: if the node is not supporting the OS
780

781
  """
782
  result = lu.rpc.call_os_get(node, os_name)
783
  result.Raise("OS '%s' not in supported OS list for node %s" %
784
               (os_name, node),
785
               prereq=True, ecode=errors.ECODE_INVAL)
786
  if not force_variant:
787
    _CheckOSVariant(result.payload, os_name)
788

    
789

    
790
def _RequireFileStorage():
791
  """Checks that file storage is enabled.
792

793
  @raise errors.OpPrereqError: when file storage is disabled
794

795
  """
796
  if not constants.ENABLE_FILE_STORAGE:
797
    raise errors.OpPrereqError("File storage disabled at configure time",
798
                               errors.ECODE_INVAL)
799

    
800

    
801
def _CheckDiskTemplate(template):
802
  """Ensure a given disk template is valid.
803

804
  """
805
  if template not in constants.DISK_TEMPLATES:
806
    msg = ("Invalid disk template name '%s', valid templates are: %s" %
807
           (template, utils.CommaJoin(constants.DISK_TEMPLATES)))
808
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
809
  if template == constants.DT_FILE:
810
    _RequireFileStorage()
811
  return True
812

    
813

    
814
def _CheckStorageType(storage_type):
815
  """Ensure a given storage type is valid.
816

817
  """
818
  if storage_type not in constants.VALID_STORAGE_TYPES:
819
    raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
820
                               errors.ECODE_INVAL)
821
  if storage_type == constants.ST_FILE:
822
    _RequireFileStorage()
823
  return True
824

    
825

    
826
def _GetClusterDomainSecret():
827
  """Reads the cluster domain secret.
828

829
  """
830
  return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
831
                               strict=True)
832

    
833

    
834
def _CheckInstanceDown(lu, instance, reason):
835
  """Ensure that an instance is not running."""
836
  if instance.admin_up:
837
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
838
                               (instance.name, reason), errors.ECODE_STATE)
839

    
840
  pnode = instance.primary_node
841
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
842
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
843
              prereq=True, ecode=errors.ECODE_ENVIRON)
844

    
845
  if instance.name in ins_l.payload:
846
    raise errors.OpPrereqError("Instance %s is running, %s" %
847
                               (instance.name, reason), errors.ECODE_STATE)
848

    
849

    
850
def _ExpandItemName(fn, name, kind):
851
  """Expand an item name.
852

853
  @param fn: the function to use for expansion
854
  @param name: requested item name
855
  @param kind: text description ('Node' or 'Instance')
856
  @return: the resolved (full) name
857
  @raise errors.OpPrereqError: if the item is not found
858

859
  """
860
  full_name = fn(name)
861
  if full_name is None:
862
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
863
                               errors.ECODE_NOENT)
864
  return full_name
865

    
866

    
867
def _ExpandNodeName(cfg, name):
868
  """Wrapper over L{_ExpandItemName} for nodes."""
869
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
870

    
871

    
872
def _ExpandInstanceName(cfg, name):
873
  """Wrapper over L{_ExpandItemName} for instance."""
874
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
875

    
876

    
877
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
878
                          memory, vcpus, nics, disk_template, disks,
879
                          bep, hvp, hypervisor_name):
880
  """Builds instance related env variables for hooks
881

882
  This builds the hook environment from individual variables.
883

884
  @type name: string
885
  @param name: the name of the instance
886
  @type primary_node: string
887
  @param primary_node: the name of the instance's primary node
888
  @type secondary_nodes: list
889
  @param secondary_nodes: list of secondary nodes as strings
890
  @type os_type: string
891
  @param os_type: the name of the instance's OS
892
  @type status: boolean
893
  @param status: the should_run status of the instance
894
  @type memory: string
895
  @param memory: the memory size of the instance
896
  @type vcpus: string
897
  @param vcpus: the count of VCPUs the instance has
898
  @type nics: list
899
  @param nics: list of tuples (ip, mac, mode, link) representing
900
      the NICs the instance has
901
  @type disk_template: string
902
  @param disk_template: the disk template of the instance
903
  @type disks: list
904
  @param disks: the list of (size, mode) pairs
905
  @type bep: dict
906
  @param bep: the backend parameters for the instance
907
  @type hvp: dict
908
  @param hvp: the hypervisor parameters for the instance
909
  @type hypervisor_name: string
910
  @param hypervisor_name: the hypervisor for the instance
911
  @rtype: dict
912
  @return: the hook environment for this instance
913

914
  """
915
  if status:
916
    str_status = "up"
917
  else:
918
    str_status = "down"
919
  env = {
920
    "OP_TARGET": name,
921
    "INSTANCE_NAME": name,
922
    "INSTANCE_PRIMARY": primary_node,
923
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
924
    "INSTANCE_OS_TYPE": os_type,
925
    "INSTANCE_STATUS": str_status,
926
    "INSTANCE_MEMORY": memory,
927
    "INSTANCE_VCPUS": vcpus,
928
    "INSTANCE_DISK_TEMPLATE": disk_template,
929
    "INSTANCE_HYPERVISOR": hypervisor_name,
930
  }
931

    
932
  if nics:
933
    nic_count = len(nics)
934
    for idx, (ip, mac, mode, link) in enumerate(nics):
935
      if ip is None:
936
        ip = ""
937
      env["INSTANCE_NIC%d_IP" % idx] = ip
938
      env["INSTANCE_NIC%d_MAC" % idx] = mac
939
      env["INSTANCE_NIC%d_MODE" % idx] = mode
940
      env["INSTANCE_NIC%d_LINK" % idx] = link
941
      if mode == constants.NIC_MODE_BRIDGED:
942
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
943
  else:
944
    nic_count = 0
945

    
946
  env["INSTANCE_NIC_COUNT"] = nic_count
947

    
948
  if disks:
949
    disk_count = len(disks)
950
    for idx, (size, mode) in enumerate(disks):
951
      env["INSTANCE_DISK%d_SIZE" % idx] = size
952
      env["INSTANCE_DISK%d_MODE" % idx] = mode
953
  else:
954
    disk_count = 0
955

    
956
  env["INSTANCE_DISK_COUNT"] = disk_count
957

    
958
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
959
    for key, value in source.items():
960
      env["INSTANCE_%s_%s" % (kind, key)] = value
961

    
962
  return env
963

    
964

    
965
def _NICListToTuple(lu, nics):
966
  """Build a list of nic information tuples.
967

968
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
969
  value in LUQueryInstanceData.
970

971
  @type lu:  L{LogicalUnit}
972
  @param lu: the logical unit on whose behalf we execute
973
  @type nics: list of L{objects.NIC}
974
  @param nics: list of nics to convert to hooks tuples
975

976
  """
977
  hooks_nics = []
978
  cluster = lu.cfg.GetClusterInfo()
979
  for nic in nics:
980
    ip = nic.ip
981
    mac = nic.mac
982
    filled_params = cluster.SimpleFillNIC(nic.nicparams)
983
    mode = filled_params[constants.NIC_MODE]
984
    link = filled_params[constants.NIC_LINK]
985
    hooks_nics.append((ip, mac, mode, link))
986
  return hooks_nics
987

    
988

    
989
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
990
  """Builds instance related env variables for hooks from an object.
991

992
  @type lu: L{LogicalUnit}
993
  @param lu: the logical unit on whose behalf we execute
994
  @type instance: L{objects.Instance}
995
  @param instance: the instance for which we should build the
996
      environment
997
  @type override: dict
998
  @param override: dictionary with key/values that will override
999
      our values
1000
  @rtype: dict
1001
  @return: the hook environment dictionary
1002

1003
  """
1004
  cluster = lu.cfg.GetClusterInfo()
1005
  bep = cluster.FillBE(instance)
1006
  hvp = cluster.FillHV(instance)
1007
  args = {
1008
    'name': instance.name,
1009
    'primary_node': instance.primary_node,
1010
    'secondary_nodes': instance.secondary_nodes,
1011
    'os_type': instance.os,
1012
    'status': instance.admin_up,
1013
    'memory': bep[constants.BE_MEMORY],
1014
    'vcpus': bep[constants.BE_VCPUS],
1015
    'nics': _NICListToTuple(lu, instance.nics),
1016
    'disk_template': instance.disk_template,
1017
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
1018
    'bep': bep,
1019
    'hvp': hvp,
1020
    'hypervisor_name': instance.hypervisor,
1021
  }
1022
  if override:
1023
    args.update(override)
1024
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
1025

    
1026

    
1027
def _AdjustCandidatePool(lu, exceptions):
1028
  """Adjust the candidate pool after node operations.
1029

1030
  """
1031
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
1032
  if mod_list:
1033
    lu.LogInfo("Promoted nodes to master candidate role: %s",
1034
               utils.CommaJoin(node.name for node in mod_list))
1035
    for name in mod_list:
1036
      lu.context.ReaddNode(name)
1037
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1038
  if mc_now > mc_max:
1039
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
1040
               (mc_now, mc_max))
1041

    
1042

    
1043
def _DecideSelfPromotion(lu, exceptions=None):
1044
  """Decide whether I should promote myself as a master candidate.
1045

1046
  """
1047
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
1048
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1049
  # the new node will increase mc_max with one, so:
1050
  mc_should = min(mc_should + 1, cp_size)
1051
  return mc_now < mc_should
1052

    
1053

    
1054
def _CheckNicsBridgesExist(lu, target_nics, target_node):
1055
  """Check that the brigdes needed by a list of nics exist.
1056

1057
  """
1058
  cluster = lu.cfg.GetClusterInfo()
1059
  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
1060
  brlist = [params[constants.NIC_LINK] for params in paramslist
1061
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
1062
  if brlist:
1063
    result = lu.rpc.call_bridges_exist(target_node, brlist)
1064
    result.Raise("Error checking bridges on destination node '%s'" %
1065
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
1066

    
1067

    
1068
def _CheckInstanceBridgesExist(lu, instance, node=None):
1069
  """Check that the brigdes needed by an instance exist.
1070

1071
  """
1072
  if node is None:
1073
    node = instance.primary_node
1074
  _CheckNicsBridgesExist(lu, instance.nics, node)
1075

    
1076

    
1077
def _CheckOSVariant(os_obj, name):
1078
  """Check whether an OS name conforms to the os variants specification.
1079

1080
  @type os_obj: L{objects.OS}
1081
  @param os_obj: OS object to check
1082
  @type name: string
1083
  @param name: OS name passed by the user, to check for validity
1084

1085
  """
1086
  if not os_obj.supported_variants:
1087
    return
1088
  try:
1089
    variant = name.split("+", 1)[1]
1090
  except IndexError:
1091
    raise errors.OpPrereqError("OS name must include a variant",
1092
                               errors.ECODE_INVAL)
1093

    
1094
  if variant not in os_obj.supported_variants:
1095
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1096

    
1097

    
1098
def _GetNodeInstancesInner(cfg, fn):
1099
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1100

    
1101

    
1102
def _GetNodeInstances(cfg, node_name):
1103
  """Returns a list of all primary and secondary instances on a node.
1104

1105
  """
1106

    
1107
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1108

    
1109

    
1110
def _GetNodePrimaryInstances(cfg, node_name):
1111
  """Returns primary instances on a node.
1112

1113
  """
1114
  return _GetNodeInstancesInner(cfg,
1115
                                lambda inst: node_name == inst.primary_node)
1116

    
1117

    
1118
def _GetNodeSecondaryInstances(cfg, node_name):
1119
  """Returns secondary instances on a node.
1120

1121
  """
1122
  return _GetNodeInstancesInner(cfg,
1123
                                lambda inst: node_name in inst.secondary_nodes)
1124

    
1125

    
1126
def _GetStorageTypeArgs(cfg, storage_type):
1127
  """Returns the arguments for a storage type.
1128

1129
  """
1130
  # Special case for file storage
1131
  if storage_type == constants.ST_FILE:
1132
    # storage.FileStorage wants a list of storage directories
1133
    return [[cfg.GetFileStorageDir()]]
1134

    
1135
  return []
1136

    
1137

    
1138
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1139
  faulty = []
1140

    
1141
  for dev in instance.disks:
1142
    cfg.SetDiskID(dev, node_name)
1143

    
1144
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1145
  result.Raise("Failed to get disk status from node %s" % node_name,
1146
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
1147

    
1148
  for idx, bdev_status in enumerate(result.payload):
1149
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1150
      faulty.append(idx)
1151

    
1152
  return faulty
1153

    
1154

    
1155
def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1156
  """Check the sanity of iallocator and node arguments and use the
1157
  cluster-wide iallocator if appropriate.
1158

1159
  Check that at most one of (iallocator, node) is specified. If none is
1160
  specified, then the LU's opcode's iallocator slot is filled with the
1161
  cluster-wide default iallocator.
1162

1163
  @type iallocator_slot: string
1164
  @param iallocator_slot: the name of the opcode iallocator slot
1165
  @type node_slot: string
1166
  @param node_slot: the name of the opcode target node slot
1167

1168
  """
1169
  node = getattr(lu.op, node_slot, None)
1170
  iallocator = getattr(lu.op, iallocator_slot, None)
1171

    
1172
  if node is not None and iallocator is not None:
1173
    raise errors.OpPrereqError("Do not specify both, iallocator and node.",
1174
                               errors.ECODE_INVAL)
1175
  elif node is None and iallocator is None:
1176
    default_iallocator = lu.cfg.GetDefaultIAllocator()
1177
    if default_iallocator:
1178
      setattr(lu.op, iallocator_slot, default_iallocator)
1179
    else:
1180
      raise errors.OpPrereqError("No iallocator or node given and no"
1181
                                 " cluster-wide default iallocator found."
1182
                                 " Please specify either an iallocator or a"
1183
                                 " node, or set a cluster-wide default"
1184
                                 " iallocator.")
1185

    
1186

    
1187
class LUPostInitCluster(LogicalUnit):
1188
  """Logical unit for running hooks after cluster initialization.
1189

1190
  """
1191
  HPATH = "cluster-init"
1192
  HTYPE = constants.HTYPE_CLUSTER
1193

    
1194
  def BuildHooksEnv(self):
1195
    """Build hooks env.
1196

1197
    """
1198
    env = {"OP_TARGET": self.cfg.GetClusterName()}
1199
    mn = self.cfg.GetMasterNode()
1200
    return env, [], [mn]
1201

    
1202
  def Exec(self, feedback_fn):
1203
    """Nothing to do.
1204

1205
    """
1206
    return True
1207

    
1208

    
1209
class LUDestroyCluster(LogicalUnit):
1210
  """Logical unit for destroying the cluster.
1211

1212
  """
1213
  HPATH = "cluster-destroy"
1214
  HTYPE = constants.HTYPE_CLUSTER
1215

    
1216
  def BuildHooksEnv(self):
1217
    """Build hooks env.
1218

1219
    """
1220
    env = {"OP_TARGET": self.cfg.GetClusterName()}
1221
    return env, [], []
1222

    
1223
  def CheckPrereq(self):
1224
    """Check prerequisites.
1225

1226
    This checks whether the cluster is empty.
1227

1228
    Any errors are signaled by raising errors.OpPrereqError.
1229

1230
    """
1231
    master = self.cfg.GetMasterNode()
1232

    
1233
    nodelist = self.cfg.GetNodeList()
1234
    if len(nodelist) != 1 or nodelist[0] != master:
1235
      raise errors.OpPrereqError("There are still %d node(s) in"
1236
                                 " this cluster." % (len(nodelist) - 1),
1237
                                 errors.ECODE_INVAL)
1238
    instancelist = self.cfg.GetInstanceList()
1239
    if instancelist:
1240
      raise errors.OpPrereqError("There are still %d instance(s) in"
1241
                                 " this cluster." % len(instancelist),
1242
                                 errors.ECODE_INVAL)
1243

    
1244
  def Exec(self, feedback_fn):
1245
    """Destroys the cluster.
1246

1247
    """
1248
    master = self.cfg.GetMasterNode()
1249
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
1250

    
1251
    # Run post hooks on master node before it's removed
1252
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1253
    try:
1254
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1255
    except:
1256
      # pylint: disable-msg=W0702
1257
      self.LogWarning("Errors occurred running hooks on %s" % master)
1258

    
1259
    result = self.rpc.call_node_stop_master(master, False)
1260
    result.Raise("Could not disable the master role")
1261

    
1262
    if modify_ssh_setup:
1263
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1264
      utils.CreateBackup(priv_key)
1265
      utils.CreateBackup(pub_key)
1266

    
1267
    return master
1268

    
1269

    
1270
def _VerifyCertificate(filename):
1271
  """Verifies a certificate for LUVerifyCluster.
1272

1273
  @type filename: string
1274
  @param filename: Path to PEM file
1275

1276
  """
1277
  try:
1278
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1279
                                           utils.ReadFile(filename))
1280
  except Exception, err: # pylint: disable-msg=W0703
1281
    return (LUVerifyCluster.ETYPE_ERROR,
1282
            "Failed to load X509 certificate %s: %s" % (filename, err))
1283

    
1284
  (errcode, msg) = \
1285
    utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1286
                                constants.SSL_CERT_EXPIRATION_ERROR)
1287

    
1288
  if msg:
1289
    fnamemsg = "While verifying %s: %s" % (filename, msg)
1290
  else:
1291
    fnamemsg = None
1292

    
1293
  if errcode is None:
1294
    return (None, fnamemsg)
1295
  elif errcode == utils.CERT_WARNING:
1296
    return (LUVerifyCluster.ETYPE_WARNING, fnamemsg)
1297
  elif errcode == utils.CERT_ERROR:
1298
    return (LUVerifyCluster.ETYPE_ERROR, fnamemsg)
1299

    
1300
  raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1301

    
1302

    
1303
class LUVerifyCluster(LogicalUnit):
1304
  """Verifies the cluster status.
1305

1306
  """
1307
  HPATH = "cluster-verify"
1308
  HTYPE = constants.HTYPE_CLUSTER
1309
  _OP_PARAMS = [
1310
    ("skip_checks", _EmptyList,
1311
     _TListOf(_TElemOf(constants.VERIFY_OPTIONAL_CHECKS))),
1312
    ("verbose", False, _TBool),
1313
    ("error_codes", False, _TBool),
1314
    ("debug_simulate_errors", False, _TBool),
1315
    ]
1316
  REQ_BGL = False
1317

    
1318
  TCLUSTER = "cluster"
1319
  TNODE = "node"
1320
  TINSTANCE = "instance"
1321

    
1322
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1323
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1324
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1325
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1326
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1327
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1328
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1329
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1330
  ENODEDRBD = (TNODE, "ENODEDRBD")
1331
  ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1332
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1333
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1334
  ENODEHV = (TNODE, "ENODEHV")
1335
  ENODELVM = (TNODE, "ENODELVM")
1336
  ENODEN1 = (TNODE, "ENODEN1")
1337
  ENODENET = (TNODE, "ENODENET")
1338
  ENODEOS = (TNODE, "ENODEOS")
1339
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1340
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1341
  ENODERPC = (TNODE, "ENODERPC")
1342
  ENODESSH = (TNODE, "ENODESSH")
1343
  ENODEVERSION = (TNODE, "ENODEVERSION")
1344
  ENODESETUP = (TNODE, "ENODESETUP")
1345
  ENODETIME = (TNODE, "ENODETIME")
1346

    
1347
  ETYPE_FIELD = "code"
1348
  ETYPE_ERROR = "ERROR"
1349
  ETYPE_WARNING = "WARNING"
1350

    
1351
  class NodeImage(object):
1352
    """A class representing the logical and physical status of a node.
1353

1354
    @type name: string
1355
    @ivar name: the node name to which this object refers
1356
    @ivar volumes: a structure as returned from
1357
        L{ganeti.backend.GetVolumeList} (runtime)
1358
    @ivar instances: a list of running instances (runtime)
1359
    @ivar pinst: list of configured primary instances (config)
1360
    @ivar sinst: list of configured secondary instances (config)
1361
    @ivar sbp: diction of {secondary-node: list of instances} of all peers
1362
        of this node (config)
1363
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1364
    @ivar dfree: free disk, as reported by the node (runtime)
1365
    @ivar offline: the offline status (config)
1366
    @type rpc_fail: boolean
1367
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1368
        not whether the individual keys were correct) (runtime)
1369
    @type lvm_fail: boolean
1370
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1371
    @type hyp_fail: boolean
1372
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1373
    @type ghost: boolean
1374
    @ivar ghost: whether this is a known node or not (config)
1375
    @type os_fail: boolean
1376
    @ivar os_fail: whether the RPC call didn't return valid OS data
1377
    @type oslist: list
1378
    @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1379

1380
    """
1381
    def __init__(self, offline=False, name=None):
1382
      self.name = name
1383
      self.volumes = {}
1384
      self.instances = []
1385
      self.pinst = []
1386
      self.sinst = []
1387
      self.sbp = {}
1388
      self.mfree = 0
1389
      self.dfree = 0
1390
      self.offline = offline
1391
      self.rpc_fail = False
1392
      self.lvm_fail = False
1393
      self.hyp_fail = False
1394
      self.ghost = False
1395
      self.os_fail = False
1396
      self.oslist = {}
1397

    
1398
  def ExpandNames(self):
1399
    self.needed_locks = {
1400
      locking.LEVEL_NODE: locking.ALL_SET,
1401
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1402
    }
1403
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1404

    
1405
  def _Error(self, ecode, item, msg, *args, **kwargs):
1406
    """Format an error message.
1407

1408
    Based on the opcode's error_codes parameter, either format a
1409
    parseable error code, or a simpler error string.
1410

1411
    This must be called only from Exec and functions called from Exec.
1412

1413
    """
1414
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1415
    itype, etxt = ecode
1416
    # first complete the msg
1417
    if args:
1418
      msg = msg % args
1419
    # then format the whole message
1420
    if self.op.error_codes:
1421
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1422
    else:
1423
      if item:
1424
        item = " " + item
1425
      else:
1426
        item = ""
1427
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1428
    # and finally report it via the feedback_fn
1429
    self._feedback_fn("  - %s" % msg)
1430

    
1431
  def _ErrorIf(self, cond, *args, **kwargs):
1432
    """Log an error message if the passed condition is True.
1433

1434
    """
1435
    cond = bool(cond) or self.op.debug_simulate_errors
1436
    if cond:
1437
      self._Error(*args, **kwargs)
1438
    # do not mark the operation as failed for WARN cases only
1439
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1440
      self.bad = self.bad or cond
1441

    
1442
  def _VerifyNode(self, ninfo, nresult):
1443
    """Perform some basic validation on data returned from a node.
1444

1445
      - check the result data structure is well formed and has all the
1446
        mandatory fields
1447
      - check ganeti version
1448

1449
    @type ninfo: L{objects.Node}
1450
    @param ninfo: the node to check
1451
    @param nresult: the results from the node
1452
    @rtype: boolean
1453
    @return: whether overall this call was successful (and we can expect
1454
         reasonable values in the respose)
1455

1456
    """
1457
    node = ninfo.name
1458
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1459

    
1460
    # main result, nresult should be a non-empty dict
1461
    test = not nresult or not isinstance(nresult, dict)
1462
    _ErrorIf(test, self.ENODERPC, node,
1463
                  "unable to verify node: no data returned")
1464
    if test:
1465
      return False
1466

    
1467
    # compares ganeti version
1468
    local_version = constants.PROTOCOL_VERSION
1469
    remote_version = nresult.get("version", None)
1470
    test = not (remote_version and
1471
                isinstance(remote_version, (list, tuple)) and
1472
                len(remote_version) == 2)
1473
    _ErrorIf(test, self.ENODERPC, node,
1474
             "connection to node returned invalid data")
1475
    if test:
1476
      return False
1477

    
1478
    test = local_version != remote_version[0]
1479
    _ErrorIf(test, self.ENODEVERSION, node,
1480
             "incompatible protocol versions: master %s,"
1481
             " node %s", local_version, remote_version[0])
1482
    if test:
1483
      return False
1484

    
1485
    # node seems compatible, we can actually try to look into its results
1486

    
1487
    # full package version
1488
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1489
                  self.ENODEVERSION, node,
1490
                  "software version mismatch: master %s, node %s",
1491
                  constants.RELEASE_VERSION, remote_version[1],
1492
                  code=self.ETYPE_WARNING)
1493

    
1494
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1495
    if isinstance(hyp_result, dict):
1496
      for hv_name, hv_result in hyp_result.iteritems():
1497
        test = hv_result is not None
1498
        _ErrorIf(test, self.ENODEHV, node,
1499
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1500

    
1501

    
1502
    test = nresult.get(constants.NV_NODESETUP,
1503
                           ["Missing NODESETUP results"])
1504
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1505
             "; ".join(test))
1506

    
1507
    return True
1508

    
1509
  def _VerifyNodeTime(self, ninfo, nresult,
1510
                      nvinfo_starttime, nvinfo_endtime):
1511
    """Check the node time.
1512

1513
    @type ninfo: L{objects.Node}
1514
    @param ninfo: the node to check
1515
    @param nresult: the remote results for the node
1516
    @param nvinfo_starttime: the start time of the RPC call
1517
    @param nvinfo_endtime: the end time of the RPC call
1518

1519
    """
1520
    node = ninfo.name
1521
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1522

    
1523
    ntime = nresult.get(constants.NV_TIME, None)
1524
    try:
1525
      ntime_merged = utils.MergeTime(ntime)
1526
    except (ValueError, TypeError):
1527
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1528
      return
1529

    
1530
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1531
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1532
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1533
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1534
    else:
1535
      ntime_diff = None
1536

    
1537
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1538
             "Node time diverges by at least %s from master node time",
1539
             ntime_diff)
1540

    
1541
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1542
    """Check the node time.
1543

1544
    @type ninfo: L{objects.Node}
1545
    @param ninfo: the node to check
1546
    @param nresult: the remote results for the node
1547
    @param vg_name: the configured VG name
1548

1549
    """
1550
    if vg_name is None:
1551
      return
1552

    
1553
    node = ninfo.name
1554
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1555

    
1556
    # checks vg existence and size > 20G
1557
    vglist = nresult.get(constants.NV_VGLIST, None)
1558
    test = not vglist
1559
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1560
    if not test:
1561
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1562
                                            constants.MIN_VG_SIZE)
1563
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1564

    
1565
    # check pv names
1566
    pvlist = nresult.get(constants.NV_PVLIST, None)
1567
    test = pvlist is None
1568
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1569
    if not test:
1570
      # check that ':' is not present in PV names, since it's a
1571
      # special character for lvcreate (denotes the range of PEs to
1572
      # use on the PV)
1573
      for _, pvname, owner_vg in pvlist:
1574
        test = ":" in pvname
1575
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1576
                 " '%s' of VG '%s'", pvname, owner_vg)
1577

    
1578
  def _VerifyNodeNetwork(self, ninfo, nresult):
1579
    """Check the node time.
1580

1581
    @type ninfo: L{objects.Node}
1582
    @param ninfo: the node to check
1583
    @param nresult: the remote results for the node
1584

1585
    """
1586
    node = ninfo.name
1587
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1588

    
1589
    test = constants.NV_NODELIST not in nresult
1590
    _ErrorIf(test, self.ENODESSH, node,
1591
             "node hasn't returned node ssh connectivity data")
1592
    if not test:
1593
      if nresult[constants.NV_NODELIST]:
1594
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1595
          _ErrorIf(True, self.ENODESSH, node,
1596
                   "ssh communication with node '%s': %s", a_node, a_msg)
1597

    
1598
    test = constants.NV_NODENETTEST not in nresult
1599
    _ErrorIf(test, self.ENODENET, node,
1600
             "node hasn't returned node tcp connectivity data")
1601
    if not test:
1602
      if nresult[constants.NV_NODENETTEST]:
1603
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1604
        for anode in nlist:
1605
          _ErrorIf(True, self.ENODENET, node,
1606
                   "tcp communication with node '%s': %s",
1607
                   anode, nresult[constants.NV_NODENETTEST][anode])
1608

    
1609
    test = constants.NV_MASTERIP not in nresult
1610
    _ErrorIf(test, self.ENODENET, node,
1611
             "node hasn't returned node master IP reachability data")
1612
    if not test:
1613
      if not nresult[constants.NV_MASTERIP]:
1614
        if node == self.master_node:
1615
          msg = "the master node cannot reach the master IP (not configured?)"
1616
        else:
1617
          msg = "cannot reach the master IP"
1618
        _ErrorIf(True, self.ENODENET, node, msg)
1619

    
1620

    
1621
  def _VerifyInstance(self, instance, instanceconfig, node_image):
1622
    """Verify an instance.
1623

1624
    This function checks to see if the required block devices are
1625
    available on the instance's node.
1626

1627
    """
1628
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1629
    node_current = instanceconfig.primary_node
1630

    
1631
    node_vol_should = {}
1632
    instanceconfig.MapLVsByNode(node_vol_should)
1633

    
1634
    for node in node_vol_should:
1635
      n_img = node_image[node]
1636
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1637
        # ignore missing volumes on offline or broken nodes
1638
        continue
1639
      for volume in node_vol_should[node]:
1640
        test = volume not in n_img.volumes
1641
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1642
                 "volume %s missing on node %s", volume, node)
1643

    
1644
    if instanceconfig.admin_up:
1645
      pri_img = node_image[node_current]
1646
      test = instance not in pri_img.instances and not pri_img.offline
1647
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1648
               "instance not running on its primary node %s",
1649
               node_current)
1650

    
1651
    for node, n_img in node_image.items():
1652
      if (not node == node_current):
1653
        test = instance in n_img.instances
1654
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1655
                 "instance should not run on node %s", node)
1656

    
1657
  def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
1658
    """Verify if there are any unknown volumes in the cluster.
1659

1660
    The .os, .swap and backup volumes are ignored. All other volumes are
1661
    reported as unknown.
1662

1663
    @type reserved: L{ganeti.utils.FieldSet}
1664
    @param reserved: a FieldSet of reserved volume names
1665

1666
    """
1667
    for node, n_img in node_image.items():
1668
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1669
        # skip non-healthy nodes
1670
        continue
1671
      for volume in n_img.volumes:
1672
        test = ((node not in node_vol_should or
1673
                volume not in node_vol_should[node]) and
1674
                not reserved.Matches(volume))
1675
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1676
                      "volume %s is unknown", volume)
1677

    
1678
  def _VerifyOrphanInstances(self, instancelist, node_image):
1679
    """Verify the list of running instances.
1680

1681
    This checks what instances are running but unknown to the cluster.
1682

1683
    """
1684
    for node, n_img in node_image.items():
1685
      for o_inst in n_img.instances:
1686
        test = o_inst not in instancelist
1687
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1688
                      "instance %s on node %s should not exist", o_inst, node)
1689

    
1690
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1691
    """Verify N+1 Memory Resilience.
1692

1693
    Check that if one single node dies we can still start all the
1694
    instances it was primary for.
1695

1696
    """
1697
    for node, n_img in node_image.items():
1698
      # This code checks that every node which is now listed as
1699
      # secondary has enough memory to host all instances it is
1700
      # supposed to should a single other node in the cluster fail.
1701
      # FIXME: not ready for failover to an arbitrary node
1702
      # FIXME: does not support file-backed instances
1703
      # WARNING: we currently take into account down instances as well
1704
      # as up ones, considering that even if they're down someone
1705
      # might want to start them even in the event of a node failure.
1706
      for prinode, instances in n_img.sbp.items():
1707
        needed_mem = 0
1708
        for instance in instances:
1709
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1710
          if bep[constants.BE_AUTO_BALANCE]:
1711
            needed_mem += bep[constants.BE_MEMORY]
1712
        test = n_img.mfree < needed_mem
1713
        self._ErrorIf(test, self.ENODEN1, node,
1714
                      "not enough memory on to accommodate"
1715
                      " failovers should peer node %s fail", prinode)
1716

    
1717
  def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1718
                       master_files):
1719
    """Verifies and computes the node required file checksums.
1720

1721
    @type ninfo: L{objects.Node}
1722
    @param ninfo: the node to check
1723
    @param nresult: the remote results for the node
1724
    @param file_list: required list of files
1725
    @param local_cksum: dictionary of local files and their checksums
1726
    @param master_files: list of files that only masters should have
1727

1728
    """
1729
    node = ninfo.name
1730
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1731

    
1732
    remote_cksum = nresult.get(constants.NV_FILELIST, None)
1733
    test = not isinstance(remote_cksum, dict)
1734
    _ErrorIf(test, self.ENODEFILECHECK, node,
1735
             "node hasn't returned file checksum data")
1736
    if test:
1737
      return
1738

    
1739
    for file_name in file_list:
1740
      node_is_mc = ninfo.master_candidate
1741
      must_have = (file_name not in master_files) or node_is_mc
1742
      # missing
1743
      test1 = file_name not in remote_cksum
1744
      # invalid checksum
1745
      test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1746
      # existing and good
1747
      test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1748
      _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1749
               "file '%s' missing", file_name)
1750
      _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1751
               "file '%s' has wrong checksum", file_name)
1752
      # not candidate and this is not a must-have file
1753
      _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1754
               "file '%s' should not exist on non master"
1755
               " candidates (and the file is outdated)", file_name)
1756
      # all good, except non-master/non-must have combination
1757
      _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1758
               "file '%s' should not exist"
1759
               " on non master candidates", file_name)
1760

    
1761
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
1762
                      drbd_map):
1763
    """Verifies and the node DRBD status.
1764

1765
    @type ninfo: L{objects.Node}
1766
    @param ninfo: the node to check
1767
    @param nresult: the remote results for the node
1768
    @param instanceinfo: the dict of instances
1769
    @param drbd_helper: the configured DRBD usermode helper
1770
    @param drbd_map: the DRBD map as returned by
1771
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1772

1773
    """
1774
    node = ninfo.name
1775
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1776

    
1777
    if drbd_helper:
1778
      helper_result = nresult.get(constants.NV_DRBDHELPER, None)
1779
      test = (helper_result == None)
1780
      _ErrorIf(test, self.ENODEDRBDHELPER, node,
1781
               "no drbd usermode helper returned")
1782
      if helper_result:
1783
        status, payload = helper_result
1784
        test = not status
1785
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
1786
                 "drbd usermode helper check unsuccessful: %s", payload)
1787
        test = status and (payload != drbd_helper)
1788
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
1789
                 "wrong drbd usermode helper: %s", payload)
1790

    
1791
    # compute the DRBD minors
1792
    node_drbd = {}
1793
    for minor, instance in drbd_map[node].items():
1794
      test = instance not in instanceinfo
1795
      _ErrorIf(test, self.ECLUSTERCFG, None,
1796
               "ghost instance '%s' in temporary DRBD map", instance)
1797
        # ghost instance should not be running, but otherwise we
1798
        # don't give double warnings (both ghost instance and
1799
        # unallocated minor in use)
1800
      if test:
1801
        node_drbd[minor] = (instance, False)
1802
      else:
1803
        instance = instanceinfo[instance]
1804
        node_drbd[minor] = (instance.name, instance.admin_up)
1805

    
1806
    # and now check them
1807
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
1808
    test = not isinstance(used_minors, (tuple, list))
1809
    _ErrorIf(test, self.ENODEDRBD, node,
1810
             "cannot parse drbd status file: %s", str(used_minors))
1811
    if test:
1812
      # we cannot check drbd status
1813
      return
1814

    
1815
    for minor, (iname, must_exist) in node_drbd.items():
1816
      test = minor not in used_minors and must_exist
1817
      _ErrorIf(test, self.ENODEDRBD, node,
1818
               "drbd minor %d of instance %s is not active", minor, iname)
1819
    for minor in used_minors:
1820
      test = minor not in node_drbd
1821
      _ErrorIf(test, self.ENODEDRBD, node,
1822
               "unallocated drbd minor %d is in use", minor)
1823

    
1824
  def _UpdateNodeOS(self, ninfo, nresult, nimg):
1825
    """Builds the node OS structures.
1826

1827
    @type ninfo: L{objects.Node}
1828
    @param ninfo: the node to check
1829
    @param nresult: the remote results for the node
1830
    @param nimg: the node image object
1831

1832
    """
1833
    node = ninfo.name
1834
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1835

    
1836
    remote_os = nresult.get(constants.NV_OSLIST, None)
1837
    test = (not isinstance(remote_os, list) or
1838
            not compat.all(isinstance(v, list) and len(v) == 7
1839
                           for v in remote_os))
1840

    
1841
    _ErrorIf(test, self.ENODEOS, node,
1842
             "node hasn't returned valid OS data")
1843

    
1844
    nimg.os_fail = test
1845

    
1846
    if test:
1847
      return
1848

    
1849
    os_dict = {}
1850

    
1851
    for (name, os_path, status, diagnose,
1852
         variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
1853

    
1854
      if name not in os_dict:
1855
        os_dict[name] = []
1856

    
1857
      # parameters is a list of lists instead of list of tuples due to
1858
      # JSON lacking a real tuple type, fix it:
1859
      parameters = [tuple(v) for v in parameters]
1860
      os_dict[name].append((os_path, status, diagnose,
1861
                            set(variants), set(parameters), set(api_ver)))
1862

    
1863
    nimg.oslist = os_dict
1864

    
1865
  def _VerifyNodeOS(self, ninfo, nimg, base):
1866
    """Verifies the node OS list.
1867

1868
    @type ninfo: L{objects.Node}
1869
    @param ninfo: the node to check
1870
    @param nimg: the node image object
1871
    @param base: the 'template' node we match against (e.g. from the master)
1872

1873
    """
1874
    node = ninfo.name
1875
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1876

    
1877
    assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
1878

    
1879
    for os_name, os_data in nimg.oslist.items():
1880
      assert os_data, "Empty OS status for OS %s?!" % os_name
1881
      f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
1882
      _ErrorIf(not f_status, self.ENODEOS, node,
1883
               "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
1884
      _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
1885
               "OS '%s' has multiple entries (first one shadows the rest): %s",
1886
               os_name, utils.CommaJoin([v[0] for v in os_data]))
1887
      # this will catched in backend too
1888
      _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
1889
               and not f_var, self.ENODEOS, node,
1890
               "OS %s with API at least %d does not declare any variant",
1891
               os_name, constants.OS_API_V15)
1892
      # comparisons with the 'base' image
1893
      test = os_name not in base.oslist
1894
      _ErrorIf(test, self.ENODEOS, node,
1895
               "Extra OS %s not present on reference node (%s)",
1896
               os_name, base.name)
1897
      if test:
1898
        continue
1899
      assert base.oslist[os_name], "Base node has empty OS status?"
1900
      _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
1901
      if not b_status:
1902
        # base OS is invalid, skipping
1903
        continue
1904
      for kind, a, b in [("API version", f_api, b_api),
1905
                         ("variants list", f_var, b_var),
1906
                         ("parameters", f_param, b_param)]:
1907
        _ErrorIf(a != b, self.ENODEOS, node,
1908
                 "OS %s %s differs from reference node %s: %s vs. %s",
1909
                 kind, os_name, base.name,
1910
                 utils.CommaJoin(a), utils.CommaJoin(b))
1911

    
1912
    # check any missing OSes
1913
    missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
1914
    _ErrorIf(missing, self.ENODEOS, node,
1915
             "OSes present on reference node %s but missing on this node: %s",
1916
             base.name, utils.CommaJoin(missing))
1917

    
1918
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1919
    """Verifies and updates the node volume data.
1920

1921
    This function will update a L{NodeImage}'s internal structures
1922
    with data from the remote call.
1923

1924
    @type ninfo: L{objects.Node}
1925
    @param ninfo: the node to check
1926
    @param nresult: the remote results for the node
1927
    @param nimg: the node image object
1928
    @param vg_name: the configured VG name
1929

1930
    """
1931
    node = ninfo.name
1932
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1933

    
1934
    nimg.lvm_fail = True
1935
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1936
    if vg_name is None:
1937
      pass
1938
    elif isinstance(lvdata, basestring):
1939
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1940
               utils.SafeEncode(lvdata))
1941
    elif not isinstance(lvdata, dict):
1942
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1943
    else:
1944
      nimg.volumes = lvdata
1945
      nimg.lvm_fail = False
1946

    
1947
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1948
    """Verifies and updates the node instance list.
1949

1950
    If the listing was successful, then updates this node's instance
1951
    list. Otherwise, it marks the RPC call as failed for the instance
1952
    list key.
1953

1954
    @type ninfo: L{objects.Node}
1955
    @param ninfo: the node to check
1956
    @param nresult: the remote results for the node
1957
    @param nimg: the node image object
1958

1959
    """
1960
    idata = nresult.get(constants.NV_INSTANCELIST, None)
1961
    test = not isinstance(idata, list)
1962
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1963
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
1964
    if test:
1965
      nimg.hyp_fail = True
1966
    else:
1967
      nimg.instances = idata
1968

    
1969
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1970
    """Verifies and computes a node information map
1971

1972
    @type ninfo: L{objects.Node}
1973
    @param ninfo: the node to check
1974
    @param nresult: the remote results for the node
1975
    @param nimg: the node image object
1976
    @param vg_name: the configured VG name
1977

1978
    """
1979
    node = ninfo.name
1980
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1981

    
1982
    # try to read free memory (from the hypervisor)
1983
    hv_info = nresult.get(constants.NV_HVINFO, None)
1984
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1985
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1986
    if not test:
1987
      try:
1988
        nimg.mfree = int(hv_info["memory_free"])
1989
      except (ValueError, TypeError):
1990
        _ErrorIf(True, self.ENODERPC, node,
1991
                 "node returned invalid nodeinfo, check hypervisor")
1992

    
1993
    # FIXME: devise a free space model for file based instances as well
1994
    if vg_name is not None:
1995
      test = (constants.NV_VGLIST not in nresult or
1996
              vg_name not in nresult[constants.NV_VGLIST])
1997
      _ErrorIf(test, self.ENODELVM, node,
1998
               "node didn't return data for the volume group '%s'"
1999
               " - it is either missing or broken", vg_name)
2000
      if not test:
2001
        try:
2002
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
2003
        except (ValueError, TypeError):
2004
          _ErrorIf(True, self.ENODERPC, node,
2005
                   "node returned invalid LVM info, check LVM status")
2006

    
2007
  def BuildHooksEnv(self):
2008
    """Build hooks env.
2009

2010
    Cluster-Verify hooks just ran in the post phase and their failure makes
2011
    the output be logged in the verify output and the verification to fail.
2012

2013
    """
2014
    all_nodes = self.cfg.GetNodeList()
2015
    env = {
2016
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
2017
      }
2018
    for node in self.cfg.GetAllNodesInfo().values():
2019
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
2020

    
2021
    return env, [], all_nodes
2022

    
2023
  def Exec(self, feedback_fn):
2024
    """Verify integrity of cluster, performing various test on nodes.
2025

2026
    """
2027
    self.bad = False
2028
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2029
    verbose = self.op.verbose
2030
    self._feedback_fn = feedback_fn
2031
    feedback_fn("* Verifying global settings")
2032
    for msg in self.cfg.VerifyConfig():
2033
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
2034

    
2035
    # Check the cluster certificates
2036
    for cert_filename in constants.ALL_CERT_FILES:
2037
      (errcode, msg) = _VerifyCertificate(cert_filename)
2038
      _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
2039

    
2040
    vg_name = self.cfg.GetVGName()
2041
    drbd_helper = self.cfg.GetDRBDHelper()
2042
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2043
    cluster = self.cfg.GetClusterInfo()
2044
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
2045
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
2046
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
2047
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
2048
                        for iname in instancelist)
2049
    i_non_redundant = [] # Non redundant instances
2050
    i_non_a_balanced = [] # Non auto-balanced instances
2051
    n_offline = 0 # Count of offline nodes
2052
    n_drained = 0 # Count of nodes being drained
2053
    node_vol_should = {}
2054

    
2055
    # FIXME: verify OS list
2056
    # do local checksums
2057
    master_files = [constants.CLUSTER_CONF_FILE]
2058
    master_node = self.master_node = self.cfg.GetMasterNode()
2059
    master_ip = self.cfg.GetMasterIP()
2060

    
2061
    file_names = ssconf.SimpleStore().GetFileList()
2062
    file_names.extend(constants.ALL_CERT_FILES)
2063
    file_names.extend(master_files)
2064
    if cluster.modify_etc_hosts:
2065
      file_names.append(constants.ETC_HOSTS)
2066

    
2067
    local_checksums = utils.FingerprintFiles(file_names)
2068

    
2069
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
2070
    node_verify_param = {
2071
      constants.NV_FILELIST: file_names,
2072
      constants.NV_NODELIST: [node.name for node in nodeinfo
2073
                              if not node.offline],
2074
      constants.NV_HYPERVISOR: hypervisors,
2075
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
2076
                                  node.secondary_ip) for node in nodeinfo
2077
                                 if not node.offline],
2078
      constants.NV_INSTANCELIST: hypervisors,
2079
      constants.NV_VERSION: None,
2080
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2081
      constants.NV_NODESETUP: None,
2082
      constants.NV_TIME: None,
2083
      constants.NV_MASTERIP: (master_node, master_ip),
2084
      constants.NV_OSLIST: None,
2085
      }
2086

    
2087
    if vg_name is not None:
2088
      node_verify_param[constants.NV_VGLIST] = None
2089
      node_verify_param[constants.NV_LVLIST] = vg_name
2090
      node_verify_param[constants.NV_PVLIST] = [vg_name]
2091
      node_verify_param[constants.NV_DRBDLIST] = None
2092

    
2093
    if drbd_helper:
2094
      node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2095

    
2096
    # Build our expected cluster state
2097
    node_image = dict((node.name, self.NodeImage(offline=node.offline,
2098
                                                 name=node.name))
2099
                      for node in nodeinfo)
2100

    
2101
    for instance in instancelist:
2102
      inst_config = instanceinfo[instance]
2103

    
2104
      for nname in inst_config.all_nodes:
2105
        if nname not in node_image:
2106
          # ghost node
2107
          gnode = self.NodeImage(name=nname)
2108
          gnode.ghost = True
2109
          node_image[nname] = gnode
2110

    
2111
      inst_config.MapLVsByNode(node_vol_should)
2112

    
2113
      pnode = inst_config.primary_node
2114
      node_image[pnode].pinst.append(instance)
2115

    
2116
      for snode in inst_config.secondary_nodes:
2117
        nimg = node_image[snode]
2118
        nimg.sinst.append(instance)
2119
        if pnode not in nimg.sbp:
2120
          nimg.sbp[pnode] = []
2121
        nimg.sbp[pnode].append(instance)
2122

    
2123
    # At this point, we have the in-memory data structures complete,
2124
    # except for the runtime information, which we'll gather next
2125

    
2126
    # Due to the way our RPC system works, exact response times cannot be
2127
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2128
    # time before and after executing the request, we can at least have a time
2129
    # window.
2130
    nvinfo_starttime = time.time()
2131
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
2132
                                           self.cfg.GetClusterName())
2133
    nvinfo_endtime = time.time()
2134

    
2135
    all_drbd_map = self.cfg.ComputeDRBDMap()
2136

    
2137
    feedback_fn("* Verifying node status")
2138

    
2139
    refos_img = None
2140

    
2141
    for node_i in nodeinfo:
2142
      node = node_i.name
2143
      nimg = node_image[node]
2144

    
2145
      if node_i.offline:
2146
        if verbose:
2147
          feedback_fn("* Skipping offline node %s" % (node,))
2148
        n_offline += 1
2149
        continue
2150

    
2151
      if node == master_node:
2152
        ntype = "master"
2153
      elif node_i.master_candidate:
2154
        ntype = "master candidate"
2155
      elif node_i.drained:
2156
        ntype = "drained"
2157
        n_drained += 1
2158
      else:
2159
        ntype = "regular"
2160
      if verbose:
2161
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2162

    
2163
      msg = all_nvinfo[node].fail_msg
2164
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2165
      if msg:
2166
        nimg.rpc_fail = True
2167
        continue
2168

    
2169
      nresult = all_nvinfo[node].payload
2170

    
2171
      nimg.call_ok = self._VerifyNode(node_i, nresult)
2172
      self._VerifyNodeNetwork(node_i, nresult)
2173
      self._VerifyNodeLVM(node_i, nresult, vg_name)
2174
      self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
2175
                            master_files)
2176
      self._VerifyNodeDrbd(node_i, nresult, instanceinfo, drbd_helper,
2177
                           all_drbd_map)
2178
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2179

    
2180
      self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2181
      self._UpdateNodeInstances(node_i, nresult, nimg)
2182
      self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2183
      self._UpdateNodeOS(node_i, nresult, nimg)
2184
      if not nimg.os_fail:
2185
        if refos_img is None:
2186
          refos_img = nimg
2187
        self._VerifyNodeOS(node_i, nimg, refos_img)
2188

    
2189
    feedback_fn("* Verifying instance status")
2190
    for instance in instancelist:
2191
      if verbose:
2192
        feedback_fn("* Verifying instance %s" % instance)
2193
      inst_config = instanceinfo[instance]
2194
      self._VerifyInstance(instance, inst_config, node_image)
2195
      inst_nodes_offline = []
2196

    
2197
      pnode = inst_config.primary_node
2198
      pnode_img = node_image[pnode]
2199
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2200
               self.ENODERPC, pnode, "instance %s, connection to"
2201
               " primary node failed", instance)
2202

    
2203
      if pnode_img.offline:
2204
        inst_nodes_offline.append(pnode)
2205

    
2206
      # If the instance is non-redundant we cannot survive losing its primary
2207
      # node, so we are not N+1 compliant. On the other hand we have no disk
2208
      # templates with more than one secondary so that situation is not well
2209
      # supported either.
2210
      # FIXME: does not support file-backed instances
2211
      if not inst_config.secondary_nodes:
2212
        i_non_redundant.append(instance)
2213
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2214
               instance, "instance has multiple secondary nodes: %s",
2215
               utils.CommaJoin(inst_config.secondary_nodes),
2216
               code=self.ETYPE_WARNING)
2217

    
2218
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2219
        i_non_a_balanced.append(instance)
2220

    
2221
      for snode in inst_config.secondary_nodes:
2222
        s_img = node_image[snode]
2223
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2224
                 "instance %s, connection to secondary node failed", instance)
2225

    
2226
        if s_img.offline:
2227
          inst_nodes_offline.append(snode)
2228

    
2229
      # warn that the instance lives on offline nodes
2230
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2231
               "instance lives on offline node(s) %s",
2232
               utils.CommaJoin(inst_nodes_offline))
2233
      # ... or ghost nodes
2234
      for node in inst_config.all_nodes:
2235
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2236
                 "instance lives on ghost node %s", node)
2237

    
2238
    feedback_fn("* Verifying orphan volumes")
2239
    reserved = utils.FieldSet(*cluster.reserved_lvs)
2240
    self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2241

    
2242
    feedback_fn("* Verifying orphan instances")
2243
    self._VerifyOrphanInstances(instancelist, node_image)
2244

    
2245
    if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2246
      feedback_fn("* Verifying N+1 Memory redundancy")
2247
      self._VerifyNPlusOneMemory(node_image, instanceinfo)
2248

    
2249
    feedback_fn("* Other Notes")
2250
    if i_non_redundant:
2251
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
2252
                  % len(i_non_redundant))
2253

    
2254
    if i_non_a_balanced:
2255
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
2256
                  % len(i_non_a_balanced))
2257

    
2258
    if n_offline:
2259
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
2260

    
2261
    if n_drained:
2262
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
2263

    
2264
    return not self.bad
2265

    
2266
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2267
    """Analyze the post-hooks' result
2268

2269
    This method analyses the hook result, handles it, and sends some
2270
    nicely-formatted feedback back to the user.
2271

2272
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
2273
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2274
    @param hooks_results: the results of the multi-node hooks rpc call
2275
    @param feedback_fn: function used send feedback back to the caller
2276
    @param lu_result: previous Exec result
2277
    @return: the new Exec result, based on the previous result
2278
        and hook results
2279

2280
    """
2281
    # We only really run POST phase hooks, and are only interested in
2282
    # their results
2283
    if phase == constants.HOOKS_PHASE_POST:
2284
      # Used to change hooks' output to proper indentation
2285
      indent_re = re.compile('^', re.M)
2286
      feedback_fn("* Hooks Results")
2287
      assert hooks_results, "invalid result from hooks"
2288

    
2289
      for node_name in hooks_results:
2290
        res = hooks_results[node_name]
2291
        msg = res.fail_msg
2292
        test = msg and not res.offline
2293
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
2294
                      "Communication failure in hooks execution: %s", msg)
2295
        if res.offline or msg:
2296
          # No need to investigate payload if node is offline or gave an error.
2297
          # override manually lu_result here as _ErrorIf only
2298
          # overrides self.bad
2299
          lu_result = 1
2300
          continue
2301
        for script, hkr, output in res.payload:
2302
          test = hkr == constants.HKR_FAIL
2303
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
2304
                        "Script %s failed, output:", script)
2305
          if test:
2306
            output = indent_re.sub('      ', output)
2307
            feedback_fn("%s" % output)
2308
            lu_result = 0
2309

    
2310
      return lu_result
2311

    
2312

    
2313
class LUVerifyDisks(NoHooksLU):
2314
  """Verifies the cluster disks status.
2315

2316
  """
2317
  REQ_BGL = False
2318

    
2319
  def ExpandNames(self):
2320
    self.needed_locks = {
2321
      locking.LEVEL_NODE: locking.ALL_SET,
2322
      locking.LEVEL_INSTANCE: locking.ALL_SET,
2323
    }
2324
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2325

    
2326
  def Exec(self, feedback_fn):
2327
    """Verify integrity of cluster disks.
2328

2329
    @rtype: tuple of three items
2330
    @return: a tuple of (dict of node-to-node_error, list of instances
2331
        which need activate-disks, dict of instance: (node, volume) for
2332
        missing volumes
2333

2334
    """
2335
    result = res_nodes, res_instances, res_missing = {}, [], {}
2336

    
2337
    vg_name = self.cfg.GetVGName()
2338
    nodes = utils.NiceSort(self.cfg.GetNodeList())
2339
    instances = [self.cfg.GetInstanceInfo(name)
2340
                 for name in self.cfg.GetInstanceList()]
2341

    
2342
    nv_dict = {}
2343
    for inst in instances:
2344
      inst_lvs = {}
2345
      if (not inst.admin_up or
2346
          inst.disk_template not in constants.DTS_NET_MIRROR):
2347
        continue
2348
      inst.MapLVsByNode(inst_lvs)
2349
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
2350
      for node, vol_list in inst_lvs.iteritems():
2351
        for vol in vol_list:
2352
          nv_dict[(node, vol)] = inst
2353

    
2354
    if not nv_dict:
2355
      return result
2356

    
2357
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
2358

    
2359
    for node in nodes:
2360
      # node_volume
2361
      node_res = node_lvs[node]
2362
      if node_res.offline:
2363
        continue
2364
      msg = node_res.fail_msg
2365
      if msg:
2366
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2367
        res_nodes[node] = msg
2368
        continue
2369

    
2370
      lvs = node_res.payload
2371
      for lv_name, (_, _, lv_online) in lvs.items():
2372
        inst = nv_dict.pop((node, lv_name), None)
2373
        if (not lv_online and inst is not None
2374
            and inst.name not in res_instances):
2375
          res_instances.append(inst.name)
2376

    
2377
    # any leftover items in nv_dict are missing LVs, let's arrange the
2378
    # data better
2379
    for key, inst in nv_dict.iteritems():
2380
      if inst.name not in res_missing:
2381
        res_missing[inst.name] = []
2382
      res_missing[inst.name].append(key)
2383

    
2384
    return result
2385

    
2386

    
2387
class LURepairDiskSizes(NoHooksLU):
2388
  """Verifies the cluster disks sizes.
2389

2390
  """
2391
  _OP_PARAMS = [("instances", _EmptyList, _TListOf(_TNonEmptyString))]
2392
  REQ_BGL = False
2393

    
2394
  def ExpandNames(self):
2395
    if self.op.instances:
2396
      self.wanted_names = []
2397
      for name in self.op.instances:
2398
        full_name = _ExpandInstanceName(self.cfg, name)
2399
        self.wanted_names.append(full_name)
2400
      self.needed_locks = {
2401
        locking.LEVEL_NODE: [],
2402
        locking.LEVEL_INSTANCE: self.wanted_names,
2403
        }
2404
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2405
    else:
2406
      self.wanted_names = None
2407
      self.needed_locks = {
2408
        locking.LEVEL_NODE: locking.ALL_SET,
2409
        locking.LEVEL_INSTANCE: locking.ALL_SET,
2410
        }
2411
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2412

    
2413
  def DeclareLocks(self, level):
2414
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
2415
      self._LockInstancesNodes(primary_only=True)
2416

    
2417
  def CheckPrereq(self):
2418
    """Check prerequisites.
2419

2420
    This only checks the optional instance list against the existing names.
2421

2422
    """
2423
    if self.wanted_names is None:
2424
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2425

    
2426
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2427
                             in self.wanted_names]
2428

    
2429
  def _EnsureChildSizes(self, disk):
2430
    """Ensure children of the disk have the needed disk size.
2431

2432
    This is valid mainly for DRBD8 and fixes an issue where the
2433
    children have smaller disk size.
2434

2435
    @param disk: an L{ganeti.objects.Disk} object
2436

2437
    """
2438
    if disk.dev_type == constants.LD_DRBD8:
2439
      assert disk.children, "Empty children for DRBD8?"
2440
      fchild = disk.children[0]
2441
      mismatch = fchild.size < disk.size
2442
      if mismatch:
2443
        self.LogInfo("Child disk has size %d, parent %d, fixing",
2444
                     fchild.size, disk.size)
2445
        fchild.size = disk.size
2446

    
2447
      # and we recurse on this child only, not on the metadev
2448
      return self._EnsureChildSizes(fchild) or mismatch
2449
    else:
2450
      return False
2451

    
2452
  def Exec(self, feedback_fn):
2453
    """Verify the size of cluster disks.
2454

2455
    """
2456
    # TODO: check child disks too
2457
    # TODO: check differences in size between primary/secondary nodes
2458
    per_node_disks = {}
2459
    for instance in self.wanted_instances:
2460
      pnode = instance.primary_node
2461
      if pnode not in per_node_disks:
2462
        per_node_disks[pnode] = []
2463
      for idx, disk in enumerate(instance.disks):
2464
        per_node_disks[pnode].append((instance, idx, disk))
2465

    
2466
    changed = []
2467
    for node, dskl in per_node_disks.items():
2468
      newl = [v[2].Copy() for v in dskl]
2469
      for dsk in newl:
2470
        self.cfg.SetDiskID(dsk, node)
2471
      result = self.rpc.call_blockdev_getsizes(node, newl)
2472
      if result.fail_msg:
2473
        self.LogWarning("Failure in blockdev_getsizes call to node"
2474
                        " %s, ignoring", node)
2475
        continue
2476
      if len(result.data) != len(dskl):
2477
        self.LogWarning("Invalid result from node %s, ignoring node results",
2478
                        node)
2479
        continue
2480
      for ((instance, idx, disk), size) in zip(dskl, result.data):
2481
        if size is None:
2482
          self.LogWarning("Disk %d of instance %s did not return size"
2483
                          " information, ignoring", idx, instance.name)
2484
          continue
2485
        if not isinstance(size, (int, long)):
2486
          self.LogWarning("Disk %d of instance %s did not return valid"
2487
                          " size information, ignoring", idx, instance.name)
2488
          continue
2489
        size = size >> 20
2490
        if size != disk.size:
2491
          self.LogInfo("Disk %d of instance %s has mismatched size,"
2492
                       " correcting: recorded %d, actual %d", idx,
2493
                       instance.name, disk.size, size)
2494
          disk.size = size
2495
          self.cfg.Update(instance, feedback_fn)
2496
          changed.append((instance.name, idx, size))
2497
        if self._EnsureChildSizes(disk):
2498
          self.cfg.Update(instance, feedback_fn)
2499
          changed.append((instance.name, idx, disk.size))
2500
    return changed
2501

    
2502

    
2503
class LURenameCluster(LogicalUnit):
2504
  """Rename the cluster.
2505

2506
  """
2507
  HPATH = "cluster-rename"
2508
  HTYPE = constants.HTYPE_CLUSTER
2509
  _OP_PARAMS = [("name", _NoDefault, _TNonEmptyString)]
2510

    
2511
  def BuildHooksEnv(self):
2512
    """Build hooks env.
2513

2514
    """
2515
    env = {
2516
      "OP_TARGET": self.cfg.GetClusterName(),
2517
      "NEW_NAME": self.op.name,
2518
      }
2519
    mn = self.cfg.GetMasterNode()
2520
    all_nodes = self.cfg.GetNodeList()
2521
    return env, [mn], all_nodes
2522

    
2523
  def CheckPrereq(self):
2524
    """Verify that the passed name is a valid one.
2525

2526
    """
2527
    hostname = netutils.GetHostInfo(self.op.name)
2528

    
2529
    new_name = hostname.name
2530
    self.ip = new_ip = hostname.ip
2531
    old_name = self.cfg.GetClusterName()
2532
    old_ip = self.cfg.GetMasterIP()
2533
    if new_name == old_name and new_ip == old_ip:
2534
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
2535
                                 " cluster has changed",
2536
                                 errors.ECODE_INVAL)
2537
    if new_ip != old_ip:
2538
      if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2539
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
2540
                                   " reachable on the network. Aborting." %
2541
                                   new_ip, errors.ECODE_NOTUNIQUE)
2542

    
2543
    self.op.name = new_name
2544

    
2545
  def Exec(self, feedback_fn):
2546
    """Rename the cluster.
2547

2548
    """
2549
    clustername = self.op.name
2550
    ip = self.ip
2551

    
2552
    # shutdown the master IP
2553
    master = self.cfg.GetMasterNode()
2554
    result = self.rpc.call_node_stop_master(master, False)
2555
    result.Raise("Could not disable the master role")
2556

    
2557
    try:
2558
      cluster = self.cfg.GetClusterInfo()
2559
      cluster.cluster_name = clustername
2560
      cluster.master_ip = ip
2561
      self.cfg.Update(cluster, feedback_fn)
2562

    
2563
      # update the known hosts file
2564
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2565
      node_list = self.cfg.GetNodeList()
2566
      try:
2567
        node_list.remove(master)
2568
      except ValueError:
2569
        pass
2570
      result = self.rpc.call_upload_file(node_list,
2571
                                         constants.SSH_KNOWN_HOSTS_FILE)
2572
      for to_node, to_result in result.iteritems():
2573
        msg = to_result.fail_msg
2574
        if msg:
2575
          msg = ("Copy of file %s to node %s failed: %s" %
2576
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
2577
          self.proc.LogWarning(msg)
2578

    
2579
    finally:
2580
      result = self.rpc.call_node_start_master(master, False, False)
2581
      msg = result.fail_msg
2582
      if msg:
2583
        self.LogWarning("Could not re-enable the master role on"
2584
                        " the master, please restart manually: %s", msg)
2585

    
2586

    
2587
class LUSetClusterParams(LogicalUnit):
2588
  """Change the parameters of the cluster.
2589

2590
  """
2591
  HPATH = "cluster-modify"
2592
  HTYPE = constants.HTYPE_CLUSTER
2593
  _OP_PARAMS = [
2594
    ("vg_name", None, _TMaybeString),
2595
    ("enabled_hypervisors", None,
2596
     _TOr(_TAnd(_TListOf(_TElemOf(constants.HYPER_TYPES)), _TTrue), _TNone)),
2597
    ("hvparams", None, _TOr(_TDictOf(_TNonEmptyString, _TDict), _TNone)),
2598
    ("beparams", None, _TOr(_TDictOf(_TNonEmptyString, _TDict), _TNone)),
2599
    ("os_hvp", None, _TOr(_TDictOf(_TNonEmptyString, _TDict), _TNone)),
2600
    ("osparams", None, _TOr(_TDictOf(_TNonEmptyString, _TDict), _TNone)),
2601
    ("candidate_pool_size", None, _TOr(_TStrictPositiveInt, _TNone)),
2602
    ("uid_pool", None, _NoType),
2603
    ("add_uids", None, _NoType),
2604
    ("remove_uids", None, _NoType),
2605
    ("maintain_node_health", None, _TMaybeBool),
2606
    ("nicparams", None, _TOr(_TDict, _TNone)),
2607
    ("drbd_helper", None, _TOr(_TString, _TNone)),
2608
    ("default_iallocator", None, _TMaybeString),
2609
    ("reserved_lvs", None, _TOr(_TListOf(_TNonEmptyString), _TNone)),
2610
    ]
2611
  REQ_BGL = False
2612

    
2613
  def CheckArguments(self):
2614
    """Check parameters
2615

2616
    """
2617
    if self.op.uid_pool:
2618
      uidpool.CheckUidPool(self.op.uid_pool)
2619

    
2620
    if self.op.add_uids:
2621
      uidpool.CheckUidPool(self.op.add_uids)
2622

    
2623
    if self.op.remove_uids:
2624
      uidpool.CheckUidPool(self.op.remove_uids)
2625

    
2626
  def ExpandNames(self):
2627
    # FIXME: in the future maybe other cluster params won't require checking on
2628
    # all nodes to be modified.
2629
    self.needed_locks = {
2630
      locking.LEVEL_NODE: locking.ALL_SET,
2631
    }
2632
    self.share_locks[locking.LEVEL_NODE] = 1
2633

    
2634
  def BuildHooksEnv(self):
2635
    """Build hooks env.
2636

2637
    """
2638
    env = {
2639
      "OP_TARGET": self.cfg.GetClusterName(),
2640
      "NEW_VG_NAME": self.op.vg_name,
2641
      }
2642
    mn = self.cfg.GetMasterNode()
2643
    return env, [mn], [mn]
2644

    
2645
  def CheckPrereq(self):
2646
    """Check prerequisites.
2647

2648
    This checks whether the given params don't conflict and
2649
    if the given volume group is valid.
2650

2651
    """
2652
    if self.op.vg_name is not None and not self.op.vg_name:
2653
      if self.cfg.HasAnyDiskOfType(constants.LD_LV):
2654
        raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
2655
                                   " instances exist", errors.ECODE_INVAL)
2656

    
2657
    if self.op.drbd_helper is not None and not self.op.drbd_helper:
2658
      if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
2659
        raise errors.OpPrereqError("Cannot disable drbd helper while"
2660
                                   " drbd-based instances exist",
2661
                                   errors.ECODE_INVAL)
2662

    
2663
    node_list = self.acquired_locks[locking.LEVEL_NODE]
2664

    
2665
    # if vg_name not None, checks given volume group on all nodes
2666
    if self.op.vg_name:
2667
      vglist = self.rpc.call_vg_list(node_list)
2668
      for node in node_list:
2669
        msg = vglist[node].fail_msg
2670
        if msg:
2671
          # ignoring down node
2672
          self.LogWarning("Error while gathering data on node %s"
2673
                          " (ignoring node): %s", node, msg)
2674
          continue
2675
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2676
                                              self.op.vg_name,
2677
                                              constants.MIN_VG_SIZE)
2678
        if vgstatus:
2679
          raise errors.OpPrereqError("Error on node '%s': %s" %
2680
                                     (node, vgstatus), errors.ECODE_ENVIRON)
2681

    
2682
    if self.op.drbd_helper:
2683
      # checks given drbd helper on all nodes
2684
      helpers = self.rpc.call_drbd_helper(node_list)
2685
      for node in node_list:
2686
        ninfo = self.cfg.GetNodeInfo(node)
2687
        if ninfo.offline:
2688
          self.LogInfo("Not checking drbd helper on offline node %s", node)
2689
          continue
2690
        msg = helpers[node].fail_msg
2691
        if msg:
2692
          raise errors.OpPrereqError("Error checking drbd helper on node"
2693
                                     " '%s': %s" % (node, msg),
2694
                                     errors.ECODE_ENVIRON)
2695
        node_helper = helpers[node].payload
2696
        if node_helper != self.op.drbd_helper:
2697
          raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
2698
                                     (node, node_helper), errors.ECODE_ENVIRON)
2699

    
2700
    self.cluster = cluster = self.cfg.GetClusterInfo()
2701
    # validate params changes
2702
    if self.op.beparams:
2703
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2704
      self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
2705

    
2706
    if self.op.nicparams:
2707
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2708
      self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
2709
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
2710
      nic_errors = []
2711

    
2712
      # check all instances for consistency
2713
      for instance in self.cfg.GetAllInstancesInfo().values():
2714
        for nic_idx, nic in enumerate(instance.nics):
2715
          params_copy = copy.deepcopy(nic.nicparams)
2716
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
2717

    
2718
          # check parameter syntax
2719
          try:
2720
            objects.NIC.CheckParameterSyntax(params_filled)
2721
          except errors.ConfigurationError, err:
2722
            nic_errors.append("Instance %s, nic/%d: %s" %
2723
                              (instance.name, nic_idx, err))
2724

    
2725
          # if we're moving instances to routed, check that they have an ip
2726
          target_mode = params_filled[constants.NIC_MODE]
2727
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2728
            nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2729
                              (instance.name, nic_idx))
2730
      if nic_errors:
2731
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2732
                                   "\n".join(nic_errors))
2733

    
2734
    # hypervisor list/parameters
2735
    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
2736
    if self.op.hvparams:
2737
      for hv_name, hv_dict in self.op.hvparams.items():
2738
        if hv_name not in self.new_hvparams:
2739
          self.new_hvparams[hv_name] = hv_dict
2740
        else:
2741
          self.new_hvparams[hv_name].update(hv_dict)
2742

    
2743
    # os hypervisor parameters
2744
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2745
    if self.op.os_hvp:
2746
      for os_name, hvs in self.op.os_hvp.items():
2747
        if os_name not in self.new_os_hvp:
2748
          self.new_os_hvp[os_name] = hvs
2749
        else:
2750
          for hv_name, hv_dict in hvs.items():
2751
            if hv_name not in self.new_os_hvp[os_name]:
2752
              self.new_os_hvp[os_name][hv_name] = hv_dict
2753
            else:
2754
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
2755

    
2756
    # os parameters
2757
    self.new_osp = objects.FillDict(cluster.osparams, {})
2758
    if self.op.osparams:
2759
      for os_name, osp in self.op.osparams.items():
2760
        if os_name not in self.new_osp:
2761
          self.new_osp[os_name] = {}
2762

    
2763
        self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
2764
                                                  use_none=True)
2765

    
2766
        if not self.new_osp[os_name]:
2767
          # we removed all parameters
2768
          del self.new_osp[os_name]
2769
        else:
2770
          # check the parameter validity (remote check)
2771
          _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
2772
                         os_name, self.new_osp[os_name])
2773

    
2774
    # changes to the hypervisor list
2775
    if self.op.enabled_hypervisors is not None:
2776
      self.hv_list = self.op.enabled_hypervisors
2777
      for hv in self.hv_list:
2778
        # if the hypervisor doesn't already exist in the cluster
2779
        # hvparams, we initialize it to empty, and then (in both
2780
        # cases) we make sure to fill the defaults, as we might not
2781
        # have a complete defaults list if the hypervisor wasn't
2782
        # enabled before
2783
        if hv not in new_hvp:
2784
          new_hvp[hv] = {}
2785
        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
2786
        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
2787
    else:
2788
      self.hv_list = cluster.enabled_hypervisors
2789

    
2790
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
2791
      # either the enabled list has changed, or the parameters have, validate
2792
      for hv_name, hv_params in self.new_hvparams.items():
2793
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
2794
            (self.op.enabled_hypervisors and
2795
             hv_name in self.op.enabled_hypervisors)):
2796
          # either this is a new hypervisor, or its parameters have changed
2797
          hv_class = hypervisor.GetHypervisor(hv_name)
2798
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2799
          hv_class.CheckParameterSyntax(hv_params)
2800
          _CheckHVParams(self, node_list, hv_name, hv_params)
2801

    
2802
    if self.op.os_hvp:
2803
      # no need to check any newly-enabled hypervisors, since the
2804
      # defaults have already been checked in the above code-block
2805
      for os_name, os_hvp in self.new_os_hvp.items():
2806
        for hv_name, hv_params in os_hvp.items():
2807
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2808
          # we need to fill in the new os_hvp on top of the actual hv_p
2809
          cluster_defaults = self.new_hvparams.get(hv_name, {})
2810
          new_osp = objects.FillDict(cluster_defaults, hv_params)
2811
          hv_class = hypervisor.GetHypervisor(hv_name)
2812
          hv_class.CheckParameterSyntax(new_osp)
2813
          _CheckHVParams(self, node_list, hv_name, new_osp)
2814

    
2815
    if self.op.default_iallocator:
2816
      alloc_script = utils.FindFile(self.op.default_iallocator,
2817
                                    constants.IALLOCATOR_SEARCH_PATH,
2818
                                    os.path.isfile)
2819
      if alloc_script is None:
2820
        raise errors.OpPrereqError("Invalid default iallocator script '%s'"
2821
                                   " specified" % self.op.default_iallocator,
2822
                                   errors.ECODE_INVAL)
2823

    
2824
  def Exec(self, feedback_fn):
2825
    """Change the parameters of the cluster.
2826

2827
    """
2828
    if self.op.vg_name is not None:
2829
      new_volume = self.op.vg_name
2830
      if not new_volume:
2831
        new_volume = None
2832
      if new_volume != self.cfg.GetVGName():
2833
        self.cfg.SetVGName(new_volume)
2834
      else:
2835
        feedback_fn("Cluster LVM configuration already in desired"
2836
                    " state, not changing")
2837
    if self.op.drbd_helper is not None:
2838
      new_helper = self.op.drbd_helper
2839
      if not new_helper:
2840
        new_helper = None
2841
      if new_helper != self.cfg.GetDRBDHelper():
2842
        self.cfg.SetDRBDHelper(new_helper)
2843
      else:
2844
        feedback_fn("Cluster DRBD helper already in desired state,"
2845
                    " not changing")
2846
    if self.op.hvparams:
2847
      self.cluster.hvparams = self.new_hvparams
2848
    if self.op.os_hvp:
2849
      self.cluster.os_hvp = self.new_os_hvp
2850
    if self.op.enabled_hypervisors is not None:
2851
      self.cluster.hvparams = self.new_hvparams
2852
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2853
    if self.op.beparams:
2854
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2855
    if self.op.nicparams:
2856
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2857
    if self.op.osparams:
2858
      self.cluster.osparams = self.new_osp
2859

    
2860
    if self.op.candidate_pool_size is not None:
2861
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
2862
      # we need to update the pool size here, otherwise the save will fail
2863
      _AdjustCandidatePool(self, [])
2864

    
2865
    if self.op.maintain_node_health is not None:
2866
      self.cluster.maintain_node_health = self.op.maintain_node_health
2867

    
2868
    if self.op.add_uids is not None:
2869
      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
2870

    
2871
    if self.op.remove_uids is not None:
2872
      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
2873

    
2874
    if self.op.uid_pool is not None:
2875
      self.cluster.uid_pool = self.op.uid_pool
2876

    
2877
    if self.op.default_iallocator is not None:
2878
      self.cluster.default_iallocator = self.op.default_iallocator
2879

    
2880
    if self.op.reserved_lvs is not None:
2881
      self.cluster.reserved_lvs = self.op.reserved_lvs
2882

    
2883
    self.cfg.Update(self.cluster, feedback_fn)
2884

    
2885

    
2886
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
2887
  """Distribute additional files which are part of the cluster configuration.
2888

2889
  ConfigWriter takes care of distributing the config and ssconf files, but
2890
  there are more files which should be distributed to all nodes. This function
2891
  makes sure those are copied.
2892

2893
  @param lu: calling logical unit
2894
  @param additional_nodes: list of nodes not in the config to distribute to
2895

2896
  """
2897
  # 1. Gather target nodes
2898
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
2899
  dist_nodes = lu.cfg.GetOnlineNodeList()
2900
  if additional_nodes is not None:
2901
    dist_nodes.extend(additional_nodes)
2902
  if myself.name in dist_nodes:
2903
    dist_nodes.remove(myself.name)
2904

    
2905
  # 2. Gather files to distribute
2906
  dist_files = set([constants.ETC_HOSTS,
2907
                    constants.SSH_KNOWN_HOSTS_FILE,
2908
                    constants.RAPI_CERT_FILE,
2909
                    constants.RAPI_USERS_FILE,
2910
                    constants.CONFD_HMAC_KEY,
2911
                    constants.CLUSTER_DOMAIN_SECRET_FILE,
2912
                   ])
2913

    
2914
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
2915
  for hv_name in enabled_hypervisors:
2916
    hv_class = hypervisor.GetHypervisor(hv_name)
2917
    dist_files.update(hv_class.GetAncillaryFiles())
2918

    
2919
  # 3. Perform the files upload
2920
  for fname in dist_files:
2921
    if os.path.exists(fname):
2922
      result = lu.rpc.call_upload_file(dist_nodes, fname)
2923
      for to_node, to_result in result.items():
2924
        msg = to_result.fail_msg
2925
        if msg:
2926
          msg = ("Copy of file %s to node %s failed: %s" %
2927
                 (fname, to_node, msg))
2928
          lu.proc.LogWarning(msg)
2929

    
2930

    
2931
class LURedistributeConfig(NoHooksLU):
2932
  """Force the redistribution of cluster configuration.
2933

2934
  This is a very simple LU.
2935

2936
  """
2937
  REQ_BGL = False
2938

    
2939
  def ExpandNames(self):
2940
    self.needed_locks = {
2941
      locking.LEVEL_NODE: locking.ALL_SET,
2942
    }
2943
    self.share_locks[locking.LEVEL_NODE] = 1
2944

    
2945
  def Exec(self, feedback_fn):
2946
    """Redistribute the configuration.
2947

2948
    """
2949
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
2950
    _RedistributeAncillaryFiles(self)
2951

    
2952

    
2953
def _WaitForSync(lu, instance, disks=None, oneshot=False):
2954
  """Sleep and poll for an instance's disk to sync.
2955

2956
  """
2957
  if not instance.disks or disks is not None and not disks:
2958
    return True
2959

    
2960
  disks = _ExpandCheckDisks(instance, disks)
2961

    
2962
  if not oneshot:
2963
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
2964

    
2965
  node = instance.primary_node
2966

    
2967
  for dev in disks:
2968
    lu.cfg.SetDiskID(dev, node)
2969

    
2970
  # TODO: Convert to utils.Retry
2971

    
2972
  retries = 0
2973
  degr_retries = 10 # in seconds, as we sleep 1 second each time
2974
  while True:
2975
    max_time = 0
2976
    done = True
2977
    cumul_degraded = False
2978
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
2979
    msg = rstats.fail_msg
2980
    if msg:
2981
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
2982
      retries += 1
2983
      if retries >= 10:
2984
        raise errors.RemoteError("Can't contact node %s for mirror data,"
2985
                                 " aborting." % node)
2986
      time.sleep(6)
2987
      continue
2988
    rstats = rstats.payload
2989
    retries = 0
2990
    for i, mstat in enumerate(rstats):
2991
      if mstat is None:
2992
        lu.LogWarning("Can't compute data for node %s/%s",
2993
                           node, disks[i].iv_name)
2994
        continue
2995

    
2996
      cumul_degraded = (cumul_degraded or
2997
                        (mstat.is_degraded and mstat.sync_percent is None))
2998
      if mstat.sync_percent is not None:
2999
        done = False
3000
        if mstat.estimated_time is not None:
3001
          rem_time = ("%s remaining (estimated)" %
3002
                      utils.FormatSeconds(mstat.estimated_time))
3003
          max_time = mstat.estimated_time
3004
        else:
3005
          rem_time = "no time estimate"
3006
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3007
                        (disks[i].iv_name, mstat.sync_percent, rem_time))
3008

    
3009
    # if we're done but degraded, let's do a few small retries, to
3010
    # make sure we see a stable and not transient situation; therefore
3011
    # we force restart of the loop
3012
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
3013
      logging.info("Degraded disks found, %d retries left", degr_retries)
3014
      degr_retries -= 1
3015
      time.sleep(1)
3016
      continue
3017

    
3018
    if done or oneshot:
3019
      break
3020

    
3021
    time.sleep(min(60, max_time))
3022

    
3023
  if done:
3024
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3025
  return not cumul_degraded
3026

    
3027

    
3028
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3029
  """Check that mirrors are not degraded.
3030

3031
  The ldisk parameter, if True, will change the test from the
3032
  is_degraded attribute (which represents overall non-ok status for
3033
  the device(s)) to the ldisk (representing the local storage status).
3034

3035
  """
3036
  lu.cfg.SetDiskID(dev, node)
3037

    
3038
  result = True
3039

    
3040
  if on_primary or dev.AssembleOnSecondary():
3041
    rstats = lu.rpc.call_blockdev_find(node, dev)
3042
    msg = rstats.fail_msg
3043
    if msg:
3044
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3045
      result = False
3046
    elif not rstats.payload:
3047
      lu.LogWarning("Can't find disk on node %s", node)
3048
      result = False
3049
    else:
3050
      if ldisk:
3051
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3052
      else:
3053
        result = result and not rstats.payload.is_degraded
3054

    
3055
  if dev.children:
3056
    for child in dev.children:
3057
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3058

    
3059
  return result
3060

    
3061

    
3062
class LUDiagnoseOS(NoHooksLU):
3063
  """Logical unit for OS diagnose/query.
3064

3065
  """
3066
  _OP_PARAMS = [
3067
    _POutputFields,
3068
    ("names", _EmptyList, _TListOf(_TNonEmptyString)),
3069
    ]
3070
  REQ_BGL = False
3071
  _FIELDS_STATIC = utils.FieldSet()
3072
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants",
3073
                                   "parameters", "api_versions")
3074

    
3075
  def CheckArguments(self):
3076
    if self.op.names:
3077
      raise errors.OpPrereqError("Selective OS query not supported",
3078
                                 errors.ECODE_INVAL)
3079

    
3080
    _CheckOutputFields(static=self._FIELDS_STATIC,
3081
                       dynamic=self._FIELDS_DYNAMIC,
3082
                       selected=self.op.output_fields)
3083

    
3084
  def ExpandNames(self):
3085
    # Lock all nodes, in shared mode
3086
    # Temporary removal of locks, should be reverted later
3087
    # TODO: reintroduce locks when they are lighter-weight
3088
    self.needed_locks = {}
3089
    #self.share_locks[locking.LEVEL_NODE] = 1
3090
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3091

    
3092
  @staticmethod
3093
  def _DiagnoseByOS(rlist):
3094
    """Remaps a per-node return list into an a per-os per-node dictionary
3095

3096
    @param rlist: a map with node names as keys and OS objects as values
3097

3098
    @rtype: dict
3099
    @return: a dictionary with osnames as keys and as value another
3100
        map, with nodes as keys and tuples of (path, status, diagnose,
3101
        variants, parameters, api_versions) as values, eg::
3102

3103
          {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
3104
                                     (/srv/..., False, "invalid api")],
3105
                           "node2": [(/srv/..., True, "", [], [])]}
3106
          }
3107

3108
    """
3109
    all_os = {}
3110
    # we build here the list of nodes that didn't fail the RPC (at RPC
3111
    # level), so that nodes with a non-responding node daemon don't
3112
    # make all OSes invalid
3113
    good_nodes = [node_name for node_name in rlist
3114
                  if not rlist[node_name].fail_msg]
3115
    for node_name, nr in rlist.items():
3116
      if nr.fail_msg or not nr.payload:
3117
        continue
3118
      for (name, path, status, diagnose, variants,
3119
           params, api_versions) in nr.payload:
3120
        if name not in all_os:
3121
          # build a list of nodes for this os containing empty lists
3122
          # for each node in node_list
3123
          all_os[name] = {}
3124
          for nname in good_nodes:
3125
            all_os[name][nname] = []
3126
        # convert params from [name, help] to (name, help)
3127
        params = [tuple(v) for v in params]
3128
        all_os[name][node_name].append((path, status, diagnose,
3129
                                        variants, params, api_versions))
3130
    return all_os
3131

    
3132
  def Exec(self, feedback_fn):
3133
    """Compute the list of OSes.
3134

3135
    """
3136
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
3137
    node_data = self.rpc.call_os_diagnose(valid_nodes)
3138
    pol = self._DiagnoseByOS(node_data)
3139
    output = []
3140

    
3141
    for os_name, os_data in pol.items():
3142
      row = []
3143
      valid = True
3144
      (variants, params, api_versions) = null_state = (set(), set(), set())
3145
      for idx, osl in enumerate(os_data.values()):
3146
        valid = bool(valid and osl and osl[0][1])
3147
        if not valid:
3148
          (variants, params, api_versions) = null_state
3149
          break
3150
        node_variants, node_params, node_api = osl[0][3:6]
3151
        if idx == 0: # first entry
3152
          variants = set(node_variants)
3153
          params = set(node_params)
3154
          api_versions = set(node_api)
3155
        else: # keep consistency
3156
          variants.intersection_update(node_variants)
3157
          params.intersection_update(node_params)
3158
          api_versions.intersection_update(node_api)
3159

    
3160
      for field in self.op.output_fields:
3161
        if field == "name":
3162
          val = os_name
3163
        elif field == "valid":
3164
          val = valid
3165
        elif field == "node_status":
3166
          # this is just a copy of the dict
3167
          val = {}
3168
          for node_name, nos_list in os_data.items():
3169
            val[node_name] = nos_list
3170
        elif field == "variants":
3171
          val = list(variants)
3172
        elif field == "parameters":
3173
          val = list(params)
3174
        elif field == "api_versions":
3175
          val = list(api_versions)
3176
        else:
3177
          raise errors.ParameterError(field)
3178
        row.append(val)
3179
      output.append(row)
3180

    
3181
    return output
3182

    
3183

    
3184
class LURemoveNode(LogicalUnit):
3185
  """Logical unit for removing a node.
3186

3187
  """
3188
  HPATH = "node-remove"
3189
  HTYPE = constants.HTYPE_NODE
3190
  _OP_PARAMS = [
3191
    _PNodeName,
3192
    ]
3193

    
3194
  def BuildHooksEnv(self):
3195
    """Build hooks env.
3196

3197
    This doesn't run on the target node in the pre phase as a failed
3198
    node would then be impossible to remove.
3199

3200
    """
3201
    env = {
3202
      "OP_TARGET": self.op.node_name,
3203
      "NODE_NAME": self.op.node_name,
3204
      }
3205
    all_nodes = self.cfg.GetNodeList()
3206
    try:
3207
      all_nodes.remove(self.op.node_name)
3208
    except ValueError:
3209
      logging.warning("Node %s which is about to be removed not found"
3210
                      " in the all nodes list", self.op.node_name)
3211
    return env, all_nodes, all_nodes
3212

    
3213
  def CheckPrereq(self):
3214
    """Check prerequisites.
3215

3216
    This checks:
3217
     - the node exists in the configuration
3218
     - it does not have primary or secondary instances
3219
     - it's not the master
3220

3221
    Any errors are signaled by raising errors.OpPrereqError.
3222

3223
    """
3224
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3225
    node = self.cfg.GetNodeInfo(self.op.node_name)
3226
    assert node is not None
3227

    
3228
    instance_list = self.cfg.GetInstanceList()
3229

    
3230
    masternode = self.cfg.GetMasterNode()
3231
    if node.name == masternode:
3232
      raise errors.OpPrereqError("Node is the master node,"
3233
                                 " you need to failover first.",
3234
                                 errors.ECODE_INVAL)
3235

    
3236
    for instance_name in instance_list:
3237
      instance = self.cfg.GetInstanceInfo(instance_name)
3238
      if node.name in instance.all_nodes:
3239
        raise errors.OpPrereqError("Instance %s is still running on the node,"
3240
                                   " please remove first." % instance_name,
3241
                                   errors.ECODE_INVAL)
3242
    self.op.node_name = node.name
3243
    self.node = node
3244

    
3245
  def Exec(self, feedback_fn):
3246
    """Removes the node from the cluster.
3247

3248
    """
3249
    node = self.node
3250
    logging.info("Stopping the node daemon and removing configs from node %s",
3251
                 node.name)
3252

    
3253
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
3254

    
3255
    # Promote nodes to master candidate as needed
3256
    _AdjustCandidatePool(self, exceptions=[node.name])
3257
    self.context.RemoveNode(node.name)
3258

    
3259
    # Run post hooks on the node before it's removed
3260
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
3261
    try:
3262
      hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
3263
    except:
3264
      # pylint: disable-msg=W0702
3265
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
3266

    
3267
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
3268
    msg = result.fail_msg
3269
    if msg:
3270
      self.LogWarning("Errors encountered on the remote node while leaving"
3271
                      " the cluster: %s", msg)
3272

    
3273
    # Remove node from our /etc/hosts
3274
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3275
      # FIXME: this should be done via an rpc call to node daemon
3276
      utils.RemoveHostFromEtcHosts(node.name)
3277
      _RedistributeAncillaryFiles(self)
3278

    
3279

    
3280
class LUQueryNodes(NoHooksLU):
3281
  """Logical unit for querying nodes.
3282

3283
  """
3284
  # pylint: disable-msg=W0142
3285
  _OP_PARAMS = [
3286
    _POutputFields,
3287
    ("names", _EmptyList, _TListOf(_TNonEmptyString)),
3288
    ("use_locking", False, _TBool),
3289
    ]
3290
  REQ_BGL = False
3291

    
3292
  _SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid",
3293
                    "master_candidate", "offline", "drained"]
3294

    
3295
  _FIELDS_DYNAMIC = utils.FieldSet(
3296
    "dtotal", "dfree",
3297
    "mtotal", "mnode", "mfree",
3298
    "bootid",
3299
    "ctotal", "cnodes", "csockets",
3300
    )
3301

    
3302
  _FIELDS_STATIC = utils.FieldSet(*[
3303
    "pinst_cnt", "sinst_cnt",
3304
    "pinst_list", "sinst_list",
3305
    "pip", "sip", "tags",
3306
    "master",
3307
    "role"] + _SIMPLE_FIELDS
3308
    )
3309

    
3310
  def CheckArguments(self):
3311
    _CheckOutputFields(static=self._FIELDS_STATIC,
3312
                       dynamic=self._FIELDS_DYNAMIC,
3313
                       selected=self.op.output_fields)
3314

    
3315
  def ExpandNames(self):
3316
    self.needed_locks = {}
3317
    self.share_locks[locking.LEVEL_NODE] = 1
3318

    
3319
    if self.op.names:
3320
      self.wanted = _GetWantedNodes(self, self.op.names)
3321
    else:
3322
      self.wanted = locking.ALL_SET
3323

    
3324
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3325
    self.do_locking = self.do_node_query and self.op.use_locking
3326
    if self.do_locking:
3327
      # if we don't request only static fields, we need to lock the nodes
3328
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
3329

    
3330
  def Exec(self, feedback_fn):
3331
    """Computes the list of nodes and their attributes.
3332

3333
    """
3334
    all_info = self.cfg.GetAllNodesInfo()
3335
    if self.do_locking:
3336
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
3337
    elif self.wanted != locking.ALL_SET:
3338
      nodenames = self.wanted
3339
      missing = set(nodenames).difference(all_info.keys())
3340
      if missing:
3341
        raise errors.OpExecError(
3342
          "Some nodes were removed before retrieving their data: %s" % missing)
3343
    else:
3344
      nodenames = all_info.keys()
3345

    
3346
    nodenames = utils.NiceSort(nodenames)
3347
    nodelist = [all_info[name] for name in nodenames]
3348

    
3349
    # begin data gathering
3350

    
3351
    if self.do_node_query:
3352
      live_data = {}
3353
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3354
                                          self.cfg.GetHypervisorType())
3355
      for name in nodenames:
3356
        nodeinfo = node_data[name]
3357
        if not nodeinfo.fail_msg and nodeinfo.payload:
3358
          nodeinfo = nodeinfo.payload
3359
          fn = utils.TryConvert
3360
          live_data[name] = {
3361
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
3362
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
3363
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
3364
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
3365
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
3366
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
3367
            "bootid": nodeinfo.get('bootid', None),
3368
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
3369
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
3370
            }
3371
        else:
3372
          live_data[name] = {}
3373
    else:
3374
      live_data = dict.fromkeys(nodenames, {})
3375

    
3376
    node_to_primary = dict([(name, set()) for name in nodenames])
3377
    node_to_secondary = dict([(name, set()) for name in nodenames])
3378

    
3379
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
3380
                             "sinst_cnt", "sinst_list"))
3381
    if inst_fields & frozenset(self.op.output_fields):
3382
      inst_data = self.cfg.GetAllInstancesInfo()
3383

    
3384
      for inst in inst_data.values():
3385
        if inst.primary_node in node_to_primary:
3386
          node_to_primary[inst.primary_node].add(inst.name)
3387
        for secnode in inst.secondary_nodes:
3388
          if secnode in node_to_secondary:
3389
            node_to_secondary[secnode].add(inst.name)
3390

    
3391
    master_node = self.cfg.GetMasterNode()
3392

    
3393
    # end data gathering
3394

    
3395
    output = []
3396
    for node in nodelist:
3397
      node_output = []
3398
      for field in self.op.output_fields:
3399
        if field in self._SIMPLE_FIELDS:
3400
          val = getattr(node, field)
3401
        elif field == "pinst_list":
3402
          val = list(node_to_primary[node.name])
3403
        elif field == "sinst_list":
3404
          val = list(node_to_secondary[node.name])
3405
        elif field == "pinst_cnt":
3406
          val = len(node_to_primary[node.name])
3407
        elif field == "sinst_cnt":
3408
          val = len(node_to_secondary[node.name])
3409
        elif field == "pip":
3410
          val = node.primary_ip
3411
        elif field == "sip":
3412
          val = node.secondary_ip
3413
        elif field == "tags":
3414
          val = list(node.GetTags())
3415
        elif field == "master":
3416
          val = node.name == master_node
3417
        elif self._FIELDS_DYNAMIC.Matches(field):
3418
          val = live_data[node.name].get(field, None)
3419
        elif field == "role":
3420
          if node.name == master_node:
3421
            val = "M"
3422
          elif node.master_candidate:
3423
            val = "C"
3424
          elif node.drained:
3425
            val = "D"
3426
          elif node.offline:
3427
            val = "O"
3428
          else:
3429
            val = "R"
3430
        else:
3431
          raise errors.ParameterError(field)
3432
        node_output.append(val)
3433
      output.append(node_output)
3434

    
3435
    return output
3436

    
3437

    
3438
class LUQueryNodeVolumes(NoHooksLU):
3439
  """Logical unit for getting volumes on node(s).
3440

3441
  """
3442
  _OP_PARAMS = [
3443
    ("nodes", _EmptyList, _TListOf(_TNonEmptyString)),
3444
    ("output_fields", _NoDefault, _TListOf(_TNonEmptyString)),
3445
    ]
3446
  REQ_BGL = False
3447
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3448
  _FIELDS_STATIC = utils.FieldSet("node")
3449

    
3450
  def CheckArguments(self):
3451
    _CheckOutputFields(static=self._FIELDS_STATIC,
3452
                       dynamic=self._FIELDS_DYNAMIC,
3453
                       selected=self.op.output_fields)
3454

    
3455
  def ExpandNames(self):
3456
    self.needed_locks = {}
3457
    self.share_locks[locking.LEVEL_NODE] = 1
3458
    if not self.op.nodes:
3459
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3460
    else:
3461
      self.needed_locks[locking.LEVEL_NODE] = \
3462
        _GetWantedNodes(self, self.op.nodes)
3463

    
3464
  def Exec(self, feedback_fn):
3465
    """Computes the list of nodes and their attributes.
3466

3467
    """
3468
    nodenames = self.acquired_locks[locking.LEVEL_NODE]
3469
    volumes = self.rpc.call_node_volumes(nodenames)
3470

    
3471
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
3472
             in self.cfg.GetInstanceList()]
3473

    
3474
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3475

    
3476
    output = []
3477
    for node in nodenames:
3478
      nresult = volumes[node]
3479
      if nresult.offline:
3480
        continue
3481
      msg = nresult.fail_msg
3482
      if msg:
3483
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3484
        continue
3485

    
3486
      node_vols = nresult.payload[:]
3487
      node_vols.sort(key=lambda vol: vol['dev'])
3488

    
3489
      for vol in node_vols:
3490
        node_output = []
3491
        for field in self.op.output_fields:
3492
          if field == "node":
3493
            val = node
3494
          elif field == "phys":
3495
            val = vol['dev']
3496
          elif field == "vg":
3497
            val = vol['vg']
3498
          elif field == "name":
3499
            val = vol['name']
3500
          elif field == "size":
3501
            val = int(float(vol['size']))
3502
          elif field == "instance":
3503
            for inst in ilist:
3504
              if node not in lv_by_node[inst]:
3505
                continue
3506
              if vol['name'] in lv_by_node[inst][node]:
3507
                val = inst.name
3508
                break
3509
            else:
3510
              val = '-'
3511
          else:
3512
            raise errors.ParameterError(field)
3513
          node_output.append(str(val))
3514

    
3515
        output.append(node_output)
3516

    
3517
    return output
3518

    
3519

    
3520
class LUQueryNodeStorage(NoHooksLU):
3521
  """Logical unit for getting information on storage units on node(s).
3522

3523
  """
3524
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3525
  _OP_PARAMS = [
3526
    ("nodes", _EmptyList, _TListOf(_TNonEmptyString)),
3527
    ("storage_type", _NoDefault, _CheckStorageType),
3528
    ("output_fields", _NoDefault, _TListOf(_TNonEmptyString)),
3529
    ("name", None, _TMaybeString),
3530
    ]
3531
  REQ_BGL = False
3532

    
3533
  def CheckArguments(self):
3534
    _CheckOutputFields(static=self._FIELDS_STATIC,
3535
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3536
                       selected=self.op.output_fields)
3537

    
3538
  def ExpandNames(self):
3539
    self.needed_locks = {}
3540
    self.share_locks[locking.LEVEL_NODE] = 1
3541

    
3542
    if self.op.nodes:
3543
      self.needed_locks[locking.LEVEL_NODE] = \
3544
        _GetWantedNodes(self, self.op.nodes)
3545
    else:
3546
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3547

    
3548
  def Exec(self, feedback_fn):
3549
    """Computes the list of nodes and their attributes.
3550

3551
    """
3552
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3553

    
3554
    # Always get name to sort by
3555
    if constants.SF_NAME in self.op.output_fields:
3556
      fields = self.op.output_fields[:]
3557
    else:
3558
      fields = [constants.SF_NAME] + self.op.output_fields
3559

    
3560
    # Never ask for node or type as it's only known to the LU
3561
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
3562
      while extra in fields:
3563
        fields.remove(extra)
3564

    
3565
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3566
    name_idx = field_idx[constants.SF_NAME]
3567

    
3568
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3569
    data = self.rpc.call_storage_list(self.nodes,
3570
                                      self.op.storage_type, st_args,
3571
                                      self.op.name, fields)
3572

    
3573
    result = []
3574

    
3575
    for node in utils.NiceSort(self.nodes):
3576
      nresult = data[node]
3577
      if nresult.offline:
3578
        continue
3579

    
3580
      msg = nresult.fail_msg
3581
      if msg:
3582
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3583
        continue
3584

    
3585
      rows = dict([(row[name_idx], row) for row in nresult.payload])
3586

    
3587
      for name in utils.NiceSort(rows.keys()):
3588
        row = rows[name]
3589

    
3590
        out = []
3591

    
3592
        for field in self.op.output_fields:
3593
          if field == constants.SF_NODE:
3594
            val = node
3595
          elif field == constants.SF_TYPE:
3596
            val = self.op.storage_type
3597
          elif field in field_idx:
3598
            val = row[field_idx[field]]
3599
          else:
3600
            raise errors.ParameterError(field)
3601

    
3602
          out.append(val)
3603

    
3604
        result.append(out)
3605

    
3606
    return result
3607

    
3608

    
3609
class LUModifyNodeStorage(NoHooksLU):
3610
  """Logical unit for modifying a storage volume on a node.
3611

3612
  """
3613
  _OP_PARAMS = [
3614
    _PNodeName,
3615
    ("storage_type", _NoDefault, _CheckStorageType),
3616
    ("name", _NoDefault, _TNonEmptyString),
3617
    ("changes", _NoDefault, _TDict),
3618
    ]
3619
  REQ_BGL = False
3620

    
3621
  def CheckArguments(self):
3622
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3623

    
3624
    storage_type = self.op.storage_type
3625

    
3626
    try:
3627
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
3628
    except KeyError:
3629
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
3630
                                 " modified" % storage_type,
3631
                                 errors.ECODE_INVAL)
3632

    
3633
    diff = set(self.op.changes.keys()) - modifiable
3634
    if diff:
3635
      raise errors.OpPrereqError("The following fields can not be modified for"
3636
                                 " storage units of type '%s': %r" %
3637
                                 (storage_type, list(diff)),
3638
                                 errors.ECODE_INVAL)
3639

    
3640
  def ExpandNames(self):
3641
    self.needed_locks = {
3642
      locking.LEVEL_NODE: self.op.node_name,
3643
      }
3644

    
3645
  def Exec(self, feedback_fn):
3646
    """Computes the list of nodes and their attributes.
3647

3648
    """
3649
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3650
    result = self.rpc.call_storage_modify(self.op.node_name,
3651
                                          self.op.storage_type, st_args,
3652
                                          self.op.name, self.op.changes)
3653
    result.Raise("Failed to modify storage unit '%s' on %s" %
3654
                 (self.op.name, self.op.node_name))
3655

    
3656

    
3657
class LUAddNode(LogicalUnit):
3658
  """Logical unit for adding node to the cluster.
3659

3660
  """
3661
  HPATH = "node-add"
3662
  HTYPE = constants.HTYPE_NODE
3663
  _OP_PARAMS = [
3664
    _PNodeName,
3665
    ("primary_ip", None, _NoType),
3666
    ("secondary_ip", None, _TMaybeString),
3667
    ("readd", False, _TBool),
3668
    ]
3669

    
3670
  def CheckArguments(self):
3671
    # validate/normalize the node name
3672
    self.op.node_name = netutils.HostInfo.NormalizeName(self.op.node_name)
3673

    
3674
  def BuildHooksEnv(self):
3675
    """Build hooks env.
3676

3677
    This will run on all nodes before, and on all nodes + the new node after.
3678

3679
    """
3680
    env = {
3681
      "OP_TARGET": self.op.node_name,
3682
      "NODE_NAME": self.op.node_name,
3683
      "NODE_PIP": self.op.primary_ip,
3684
      "NODE_SIP": self.op.secondary_ip,
3685
      }
3686
    nodes_0 = self.cfg.GetNodeList()
3687
    nodes_1 = nodes_0 + [self.op.node_name, ]
3688
    return env, nodes_0, nodes_1
3689

    
3690
  def CheckPrereq(self):
3691
    """Check prerequisites.
3692

3693
    This checks:
3694
     - the new node is not already in the config
3695
     - it is resolvable
3696
     - its parameters (single/dual homed) matches the cluster
3697

3698
    Any errors are signaled by raising errors.OpPrereqError.
3699

3700
    """
3701
    node_name = self.op.node_name
3702
    cfg = self.cfg
3703

    
3704
    dns_data = netutils.GetHostInfo(node_name)
3705

    
3706
    node = dns_data.name
3707
    primary_ip = self.op.primary_ip = dns_data.ip
3708
    if self.op.secondary_ip is None:
3709
      self.op.secondary_ip = primary_ip
3710
    if not netutils.IsValidIP4(self.op.secondary_ip):
3711
      raise errors.OpPrereqError("Invalid secondary IP given",
3712
                                 errors.ECODE_INVAL)
3713
    secondary_ip = self.op.secondary_ip
3714

    
3715
    node_list = cfg.GetNodeList()
3716
    if not self.op.readd and node in node_list:
3717
      raise errors.OpPrereqError("Node %s is already in the configuration" %
3718
                                 node, errors.ECODE_EXISTS)
3719
    elif self.op.readd and node not in node_list:
3720
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
3721
                                 errors.ECODE_NOENT)
3722

    
3723
    self.changed_primary_ip = False
3724

    
3725
    for existing_node_name in node_list:
3726
      existing_node = cfg.GetNodeInfo(existing_node_name)
3727

    
3728
      if self.op.readd and node == existing_node_name:
3729
        if existing_node.secondary_ip != secondary_ip:
3730
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
3731
                                     " address configuration as before",
3732
                                     errors.ECODE_INVAL)
3733
        if existing_node.primary_ip != primary_ip:
3734
          self.changed_primary_ip = True
3735

    
3736
        continue
3737

    
3738
      if (existing_node.primary_ip == primary_ip or
3739
          existing_node.secondary_ip == primary_ip or
3740
          existing_node.primary_ip == secondary_ip or
3741
          existing_node.secondary_ip == secondary_ip):
3742
        raise errors.OpPrereqError("New node ip address(es) conflict with"
3743
                                   " existing node %s" % existing_node.name,
3744
                                   errors.ECODE_NOTUNIQUE)
3745

    
3746
    # check that the type of the node (single versus dual homed) is the
3747
    # same as for the master
3748
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
3749
    master_singlehomed = myself.secondary_ip == myself.primary_ip
3750
    newbie_singlehomed = secondary_ip == primary_ip
3751
    if master_singlehomed != newbie_singlehomed:
3752
      if master_singlehomed:
3753
        raise errors.OpPrereqError("The master has no private ip but the"
3754
                                   " new node has one",
3755
                                   errors.ECODE_INVAL)
3756
      else:
3757
        raise errors.OpPrereqError("The master has a private ip but the"
3758
                                   " new node doesn't have one",
3759
                                   errors.ECODE_INVAL)
3760

    
3761
    # checks reachability
3762
    if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
3763
      raise errors.OpPrereqError("Node not reachable by ping",
3764
                                 errors.ECODE_ENVIRON)
3765

    
3766
    if not newbie_singlehomed:
3767
      # check reachability from my secondary ip to newbie's secondary ip
3768
      if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
3769
                           source=myself.secondary_ip):
3770
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
3771
                                   " based ping to noded port",
3772
                                   errors.ECODE_ENVIRON)
3773

    
3774
    if self.op.readd:
3775
      exceptions = [node]
3776
    else:
3777
      exceptions = []
3778

    
3779
    self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
3780

    
3781
    if self.op.readd:
3782
      self.new_node = self.cfg.GetNodeInfo(node)
3783
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
3784
    else:
3785
      self.new_node = objects.Node(name=node,
3786
                                   primary_ip=primary_ip,
3787
                                   secondary_ip=secondary_ip,
3788
                                   master_candidate=self.master_candidate,
3789
                                   offline=False, drained=False)
3790

    
3791
  def Exec(self, feedback_fn):
3792
    """Adds the new node to the cluster.
3793

3794
    """
3795
    new_node = self.new_node
3796
    node = new_node.name
3797

    
3798
    # for re-adds, reset the offline/drained/master-candidate flags;
3799
    # we need to reset here, otherwise offline would prevent RPC calls
3800
    # later in the procedure; this also means that if the re-add
3801
    # fails, we are left with a non-offlined, broken node
3802
    if self.op.readd:
3803
      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
3804
      self.LogInfo("Readding a node, the offline/drained flags were reset")
3805
      # if we demote the node, we do cleanup later in the procedure
3806
      new_node.master_candidate = self.master_candidate
3807
      if self.changed_primary_ip:
3808
        new_node.primary_ip = self.op.primary_ip
3809

    
3810
    # notify the user about any possible mc promotion
3811
    if new_node.master_candidate:
3812
      self.LogInfo("Node will be a master candidate")
3813

    
3814
    # check connectivity
3815
    result = self.rpc.call_version([node])[node]
3816
    result.Raise("Can't get version information from node %s" % node)
3817
    if constants.PROTOCOL_VERSION == result.payload:
3818
      logging.info("Communication to node %s fine, sw version %s match",
3819
                   node, result.payload)
3820
    else:
3821
      raise errors.OpExecError("Version mismatch master version %s,"
3822
                               " node version %s" %
3823
                               (constants.PROTOCOL_VERSION, result.payload))
3824

    
3825
    # setup ssh on node
3826
    if self.cfg.GetClusterInfo().modify_ssh_setup:
3827
      logging.info("Copy ssh key to node %s", node)
3828
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
3829
      keyarray = []
3830
      keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
3831
                  constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
3832
                  priv_key, pub_key]
3833

    
3834
      for i in keyfiles:
3835
        keyarray.append(utils.ReadFile(i))
3836

    
3837
      result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
3838
                                      keyarray[2], keyarray[3], keyarray[4],
3839
                                      keyarray[5])
3840
      result.Raise("Cannot transfer ssh keys to the new node")
3841

    
3842
    # Add node to our /etc/hosts, and add key to known_hosts
3843
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3844
      # FIXME: this should be done via an rpc call to node daemon
3845
      utils.AddHostToEtcHosts(new_node.name)
3846

    
3847
    if new_node.secondary_ip != new_node.primary_ip:
3848
      result = self.rpc.call_node_has_ip_address(new_node.name,
3849
                                                 new_node.secondary_ip)
3850
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
3851
                   prereq=True, ecode=errors.ECODE_ENVIRON)
3852
      if not result.payload:
3853
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
3854
                                 " you gave (%s). Please fix and re-run this"
3855
                                 " command." % new_node.secondary_ip)
3856

    
3857
    node_verify_list = [self.cfg.GetMasterNode()]
3858
    node_verify_param = {
3859
      constants.NV_NODELIST: [node],
3860
      # TODO: do a node-net-test as well?
3861
    }
3862

    
3863
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
3864
                                       self.cfg.GetClusterName())
3865
    for verifier in node_verify_list:
3866
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
3867
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
3868
      if nl_payload:
3869
        for failed in nl_payload:
3870
          feedback_fn("ssh/hostname verification failed"
3871
                      " (checking from %s): %s" %
3872
                      (verifier, nl_payload[failed]))
3873
        raise errors.OpExecError("ssh/hostname verification failed.")
3874

    
3875
    if self.op.readd:
3876
      _RedistributeAncillaryFiles(self)
3877
      self.context.ReaddNode(new_node)
3878
      # make sure we redistribute the config
3879
      self.cfg.Update(new_node, feedback_fn)
3880
      # and make sure the new node will not have old files around
3881
      if not new_node.master_candidate:
3882
        result = self.rpc.call_node_demote_from_mc(new_node.name)
3883
        msg = result.fail_msg
3884
        if msg:
3885
          self.LogWarning("Node failed to demote itself from master"
3886
                          " candidate status: %s" % msg)
3887
    else:
3888
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
3889
      self.context.AddNode(new_node, self.proc.GetECId())
3890

    
3891

    
3892
class LUSetNodeParams(LogicalUnit):
3893
  """Modifies the parameters of a node.
3894

3895
  """
3896
  HPATH = "node-modify"
3897
  HTYPE = constants.HTYPE_NODE
3898
  _OP_PARAMS = [
3899
    _PNodeName,
3900
    ("master_candidate", None, _TMaybeBool),
3901
    ("offline", None, _TMaybeBool),
3902
    ("drained", None, _TMaybeBool),
3903
    ("auto_promote", False, _TBool),
3904
    _PForce,
3905
    ]
3906
  REQ_BGL = False
3907

    
3908
  def CheckArguments(self):
3909
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3910
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
3911
    if all_mods.count(None) == 3:
3912
      raise errors.OpPrereqError("Please pass at least one modification",
3913
                                 errors.ECODE_INVAL)
3914
    if all_mods.count(True) > 1:
3915
      raise errors.OpPrereqError("Can't set the node into more than one"
3916
                                 " state at the same time",
3917
                                 errors.ECODE_INVAL)
3918

    
3919
    # Boolean value that tells us whether we're offlining or draining the node
3920
    self.offline_or_drain = (self.op.offline == True or
3921
                             self.op.drained == True)
3922
    self.deoffline_or_drain = (self.op.offline == False or
3923
                               self.op.drained == False)
3924
    self.might_demote = (self.op.master_candidate == False or
3925
                         self.offline_or_drain)
3926

    
3927
    self.lock_all = self.op.auto_promote and self.might_demote
3928

    
3929

    
3930
  def ExpandNames(self):
3931
    if self.lock_all:
3932
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
3933
    else:
3934
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
3935

    
3936
  def BuildHooksEnv(self):
3937
    """Build hooks env.
3938

3939
    This runs on the master node.
3940

3941
    """
3942
    env = {
3943
      "OP_TARGET": self.op.node_name,
3944
      "MASTER_CANDIDATE": str(self.op.master_candidate),
3945
      "OFFLINE": str(self.op.offline),
3946
      "DRAINED": str(self.op.drained),
3947
      }
3948
    nl = [self.cfg.GetMasterNode(),
3949
          self.op.node_name]
3950
    return env, nl, nl
3951

    
3952
  def CheckPrereq(self):
3953
    """Check prerequisites.
3954

3955
    This only checks the instance list against the existing names.
3956

3957
    """
3958
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
3959

    
3960
    if (self.op.master_candidate is not None or
3961
        self.op.drained is not None or
3962
        self.op.offline is not None):
3963
      # we can't change the master's node flags
3964
      if self.op.node_name == self.cfg.GetMasterNode():
3965
        raise errors.OpPrereqError("The master role can be changed"
3966
                                   " only via masterfailover",
3967
                                   errors.ECODE_INVAL)
3968

    
3969

    
3970
    if node.master_candidate and self.might_demote and not self.lock_all:
3971
      assert not self.op.auto_promote, "auto-promote set but lock_all not"
3972
      # check if after removing the current node, we're missing master
3973
      # candidates
3974
      (mc_remaining, mc_should, _) = \
3975
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
3976
      if mc_remaining < mc_should:
3977
        raise errors.OpPrereqError("Not enough master candidates, please"
3978
                                   " pass auto_promote to allow promotion",
3979
                                   errors.ECODE_INVAL)
3980

    
3981
    if (self.op.master_candidate == True and
3982
        ((node.offline and not self.op.offline == False) or
3983
         (node.drained and not self.op.drained == False))):
3984
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
3985
                                 " to master_candidate" % node.name,
3986
                                 errors.ECODE_INVAL)
3987

    
3988
    # If we're being deofflined/drained, we'll MC ourself if needed
3989
    if (self.deoffline_or_drain and not self.offline_or_drain and not
3990
        self.op.master_candidate == True and not node.master_candidate):
3991
      self.op.master_candidate = _DecideSelfPromotion(self)
3992
      if self.op.master_candidate:
3993
        self.LogInfo("Autopromoting node to master candidate")
3994

    
3995
    return
3996

    
3997
  def Exec(self, feedback_fn):
3998
    """Modifies a node.
3999

4000
    """
4001
    node = self.node
4002

    
4003
    result = []
4004
    changed_mc = False
4005

    
4006
    if self.op.offline is not None:
4007
      node.offline = self.op.offline
4008
      result.append(("offline", str(self.op.offline)))
4009
      if self.op.offline == True:
4010
        if node.master_candidate:
4011
          node.master_candidate = False
4012
          changed_mc = True
4013
          result.append(("master_candidate", "auto-demotion due to offline"))
4014
        if node.drained:
4015
          node.drained = False
4016
          result.append(("drained", "clear drained status due to offline"))
4017

    
4018
    if self.op.master_candidate is not None:
4019
      node.master_candidate = self.op.master_candidate
4020
      changed_mc = True
4021
      result.append(("master_candidate", str(self.op.master_candidate)))
4022
      if self.op.master_candidate == False:
4023
        rrc = self.rpc.call_node_demote_from_mc(node.name)
4024
        msg = rrc.fail_msg
4025
        if msg:
4026
          self.LogWarning("Node failed to demote itself: %s" % msg)
4027

    
4028
    if self.op.drained is not None:
4029
      node.drained = self.op.drained
4030
      result.append(("drained", str(self.op.drained)))
4031
      if self.op.drained == True:
4032
        if node.master_candidate:
4033
          node.master_candidate = False
4034
          changed_mc = True
4035
          result.append(("master_candidate", "auto-demotion due to drain"))
4036
          rrc = self.rpc.call_node_demote_from_mc(node.name)
4037
          msg = rrc.fail_msg
4038
          if msg:
4039
            self.LogWarning("Node failed to demote itself: %s" % msg)
4040
        if node.offline:
4041
          node.offline = False
4042
          result.append(("offline", "clear offline status due to drain"))
4043

    
4044
    # we locked all nodes, we adjust the CP before updating this node
4045
    if self.lock_all:
4046
      _AdjustCandidatePool(self, [node.name])
4047

    
4048
    # this will trigger configuration file update, if needed
4049
    self.cfg.Update(node, feedback_fn)
4050

    
4051
    # this will trigger job queue propagation or cleanup
4052
    if changed_mc:
4053
      self.context.ReaddNode(node)
4054

    
4055
    return result
4056

    
4057

    
4058
class LUPowercycleNode(NoHooksLU):
4059
  """Powercycles a node.
4060

4061
  """
4062
  _OP_PARAMS = [
4063
    _PNodeName,
4064
    _PForce,
4065
    ]
4066
  REQ_BGL = False
4067

    
4068
  def CheckArguments(self):
4069
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4070
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
4071
      raise errors.OpPrereqError("The node is the master and the force"
4072
                                 " parameter was not set",
4073
                                 errors.ECODE_INVAL)
4074

    
4075
  def ExpandNames(self):
4076
    """Locking for PowercycleNode.
4077

4078
    This is a last-resort option and shouldn't block on other
4079
    jobs. Therefore, we grab no locks.
4080

4081
    """
4082
    self.needed_locks = {}
4083

    
4084
  def Exec(self, feedback_fn):
4085
    """Reboots a node.
4086

4087
    """
4088
    result = self.rpc.call_node_powercycle(self.op.node_name,
4089
                                           self.cfg.GetHypervisorType())
4090
    result.Raise("Failed to schedule the reboot")
4091
    return result.payload
4092

    
4093

    
4094
class LUQueryClusterInfo(NoHooksLU):
4095
  """Query cluster configuration.
4096

4097
  """
4098
  REQ_BGL = False
4099

    
4100
  def ExpandNames(self):
4101
    self.needed_locks = {}
4102

    
4103
  def Exec(self, feedback_fn):
4104
    """Return cluster config.
4105

4106
    """
4107
    cluster = self.cfg.GetClusterInfo()
4108
    os_hvp = {}
4109

    
4110
    # Filter just for enabled hypervisors
4111
    for os_name, hv_dict in cluster.os_hvp.items():
4112
      os_hvp[os_name] = {}
4113
      for hv_name, hv_params in hv_dict.items():
4114
        if hv_name in cluster.enabled_hypervisors:
4115
          os_hvp[os_name][hv_name] = hv_params
4116

    
4117
    result = {
4118
      "software_version": constants.RELEASE_VERSION,
4119
      "protocol_version": constants.PROTOCOL_VERSION,
4120
      "config_version": constants.CONFIG_VERSION,
4121
      "os_api_version": max(constants.OS_API_VERSIONS),
4122
      "export_version": constants.EXPORT_VERSION,
4123
      "architecture": (platform.architecture()[0], platform.machine()),
4124
      "name": cluster.cluster_name,
4125
      "master": cluster.master_node,
4126
      "default_hypervisor": cluster.enabled_hypervisors[0],
4127
      "enabled_hypervisors": cluster.enabled_hypervisors,
4128
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
4129
                        for hypervisor_name in cluster.enabled_hypervisors]),
4130
      "os_hvp": os_hvp,
4131
      "beparams": cluster.beparams,
4132
      "osparams": cluster.osparams,
4133
      "nicparams": cluster.nicparams,
4134
      "candidate_pool_size": cluster.candidate_pool_size,
4135
      "master_netdev": cluster.master_netdev,
4136
      "volume_group_name": cluster.volume_group_name,
4137
      "drbd_usermode_helper": cluster.drbd_usermode_helper,
4138
      "file_storage_dir": cluster.file_storage_dir,
4139
      "maintain_node_health": cluster.maintain_node_health,
4140
      "ctime": cluster.ctime,
4141
      "mtime": cluster.mtime,
4142
      "uuid": cluster.uuid,
4143
      "tags": list(cluster.GetTags()),
4144
      "uid_pool": cluster.uid_pool,
4145
      "default_iallocator": cluster.default_iallocator,
4146
      "reserved_lvs": cluster.reserved_lvs,
4147
      }
4148

    
4149
    return result
4150

    
4151

    
4152
class LUQueryConfigValues(NoHooksLU):
4153
  """Return configuration values.
4154

4155
  """
4156
  _OP_PARAMS = [_POutputFields]
4157
  REQ_BGL = False
4158
  _FIELDS_DYNAMIC = utils.FieldSet()
4159
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
4160
                                  "watcher_pause")
4161

    
4162
  def CheckArguments(self):
4163
    _CheckOutputFields(static=self._FIELDS_STATIC,
4164
                       dynamic=self._FIELDS_DYNAMIC,
4165
                       selected=self.op.output_fields)
4166

    
4167
  def ExpandNames(self):
4168
    self.needed_locks = {}
4169

    
4170
  def Exec(self, feedback_fn):
4171
    """Dump a representation of the cluster config to the standard output.
4172

4173
    """
4174
    values = []
4175
    for field in self.op.output_fields:
4176
      if field == "cluster_name":
4177
        entry = self.cfg.GetClusterName()
4178
      elif field == "master_node":
4179
        entry = self.cfg.GetMasterNode()
4180
      elif field == "drain_flag":
4181
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
4182
      elif field == "watcher_pause":
4183
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
4184
      else:
4185
        raise errors.ParameterError(field)
4186
      values.append(entry)
4187
    return values
4188

    
4189

    
4190
class LUActivateInstanceDisks(NoHooksLU):
4191
  """Bring up an instance's disks.
4192

4193
  """
4194
  _OP_PARAMS = [
4195
    _PInstanceName,
4196
    ("ignore_size", False, _TBool),
4197
    ]
4198
  REQ_BGL = False
4199

    
4200
  def ExpandNames(self):
4201
    self._ExpandAndLockInstance()
4202
    self.needed_locks[locking.LEVEL_NODE] = []
4203
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4204

    
4205
  def DeclareLocks(self, level):
4206
    if level == locking.LEVEL_NODE:
4207
      self._LockInstancesNodes()
4208

    
4209
  def CheckPrereq(self):
4210
    """Check prerequisites.
4211

4212
    This checks that the instance is in the cluster.
4213

4214
    """
4215
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4216
    assert self.instance is not None, \
4217
      "Cannot retrieve locked instance %s" % self.op.instance_name
4218
    _CheckNodeOnline(self, self.instance.primary_node)
4219

    
4220
  def Exec(self, feedback_fn):
4221
    """Activate the disks.
4222

4223
    """
4224
    disks_ok, disks_info = \
4225
              _AssembleInstanceDisks(self, self.instance,
4226
                                     ignore_size=self.op.ignore_size)
4227
    if not disks_ok:
4228
      raise errors.OpExecError("Cannot activate block devices")
4229

    
4230
    return disks_info
4231

    
4232

    
4233
def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
4234
                           ignore_size=False):
4235
  """Prepare the block devices for an instance.
4236

4237
  This sets up the block devices on all nodes.
4238

4239
  @type lu: L{LogicalUnit}
4240
  @param lu: the logical unit on whose behalf we execute
4241
  @type instance: L{objects.Instance}
4242
  @param instance: the instance for whose disks we assemble
4243
  @type disks: list of L{objects.Disk} or None
4244
  @param disks: which disks to assemble (or all, if None)
4245
  @type ignore_secondaries: boolean
4246
  @param ignore_secondaries: if true, errors on secondary nodes
4247
      won't result in an error return from the function
4248
  @type ignore_size: boolean
4249
  @param ignore_size: if true, the current known size of the disk
4250
      will not be used during the disk activation, useful for cases
4251
      when the size is wrong
4252
  @return: False if the operation failed, otherwise a list of
4253
      (host, instance_visible_name, node_visible_name)
4254
      with the mapping from node devices to instance devices
4255

4256
  """
4257
  device_info = []
4258
  disks_ok = True
4259
  iname = instance.name
4260
  disks = _ExpandCheckDisks(instance, disks)
4261

    
4262
  # With the two passes mechanism we try to reduce the window of
4263
  # opportunity for the race condition of switching DRBD to primary
4264
  # before handshaking occured, but we do not eliminate it
4265

    
4266
  # The proper fix would be to wait (with some limits) until the
4267
  # connection has been made and drbd transitions from WFConnection
4268
  # into any other network-connected state (Connected, SyncTarget,
4269
  # SyncSource, etc.)
4270

    
4271
  # 1st pass, assemble on all nodes in secondary mode
4272
  for inst_disk in disks:
4273
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4274
      if ignore_size:
4275
        node_disk = node_disk.Copy()
4276
        node_disk.UnsetSize()
4277
      lu.cfg.SetDiskID(node_disk, node)
4278
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
4279
      msg = result.fail_msg
4280
      if msg:
4281
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
4282
                           " (is_primary=False, pass=1): %s",
4283
                           inst_disk.iv_name, node, msg)
4284
        if not ignore_secondaries:
4285
          disks_ok = False
4286

    
4287
  # FIXME: race condition on drbd migration to primary
4288

    
4289
  # 2nd pass, do only the primary node
4290
  for inst_disk in disks:
4291
    dev_path = None
4292

    
4293
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4294
      if node != instance.primary_node:
4295
        continue
4296
      if ignore_size:
4297
        node_disk = node_disk.Copy()
4298
        node_disk.UnsetSize()
4299
      lu.cfg.SetDiskID(node_disk, node)
4300
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
4301
      msg = result.fail_msg
4302
      if msg:
4303
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
4304
                           " (is_primary=True, pass=2): %s",
4305
                           inst_disk.iv_name, node, msg)
4306
        disks_ok = False
4307
      else:
4308
        dev_path = result.payload
4309

    
4310
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
4311

    
4312
  # leave the disks configured for the primary node
4313
  # this is a workaround that would be fixed better by
4314
  # improving the logical/physical id handling
4315
  for disk in disks:
4316
    lu.cfg.SetDiskID(disk, instance.primary_node)
4317

    
4318
  return disks_ok, device_info
4319

    
4320

    
4321
def _StartInstanceDisks(lu, instance, force):
4322
  """Start the disks of an instance.
4323

4324
  """
4325
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
4326
                                           ignore_secondaries=force)
4327
  if not disks_ok:
4328
    _ShutdownInstanceDisks(lu, instance)
4329
    if force is not None and not force:
4330
      lu.proc.LogWarning("", hint="If the message above refers to a"
4331
                         " secondary node,"
4332
                         " you can retry the operation using '--force'.")
4333
    raise errors.OpExecError("Disk consistency error")
4334

    
4335

    
4336
class LUDeactivateInstanceDisks(NoHooksLU):
4337
  """Shutdown an instance's disks.
4338

4339
  """
4340
  _OP_PARAMS = [
4341
    _PInstanceName,
4342
    ]
4343
  REQ_BGL = False
4344

    
4345
  def ExpandNames(self):
4346
    self._ExpandAndLockInstance()
4347
    self.needed_locks[locking.LEVEL_NODE] = []
4348
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4349

    
4350
  def DeclareLocks(self, level):
4351
    if level == locking.LEVEL_NODE:
4352
      self._LockInstancesNodes()
4353

    
4354
  def CheckPrereq(self):
4355
    """Check prerequisites.
4356

4357
    This checks that the instance is in the cluster.
4358

4359
    """
4360
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4361
    assert self.instance is not None, \
4362
      "Cannot retrieve locked instance %s" % self.op.instance_name
4363

    
4364
  def Exec(self, feedback_fn):
4365
    """Deactivate the disks
4366

4367
    """
4368
    instance = self.instance
4369
    _SafeShutdownInstanceDisks(self, instance)
4370

    
4371

    
4372
def _SafeShutdownInstanceDisks(lu, instance, disks=None):
4373
  """Shutdown block devices of an instance.
4374

4375
  This function checks if an instance is running, before calling
4376
  _ShutdownInstanceDisks.
4377

4378
  """
4379
  _CheckInstanceDown(lu, instance, "cannot shutdown disks")
4380
  _ShutdownInstanceDisks(lu, instance, disks=disks)
4381

    
4382

    
4383
def _ExpandCheckDisks(instance, disks):
4384
  """Return the instance disks selected by the disks list
4385

4386
  @type disks: list of L{objects.Disk} or None
4387
  @param disks: selected disks
4388
  @rtype: list of L{objects.Disk}
4389
  @return: selected instance disks to act on
4390

4391
  """
4392
  if disks is None:
4393
    return instance.disks
4394
  else:
4395
    if not set(disks).issubset(instance.disks):
4396
      raise errors.ProgrammerError("Can only act on disks belonging to the"
4397
                                   " target instance")
4398
    return disks
4399

    
4400

    
4401
def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
4402
  """Shutdown block devices of an instance.
4403

4404
  This does the shutdown on all nodes of the instance.
4405

4406
  If the ignore_primary is false, errors on the primary node are
4407
  ignored.
4408

4409
  """
4410
  all_result = True
4411
  disks = _ExpandCheckDisks(instance, disks)
4412

    
4413
  for disk in disks:
4414
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
4415
      lu.cfg.SetDiskID(top_disk, node)
4416
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
4417
      msg = result.fail_msg
4418
      if msg:
4419
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
4420
                      disk.iv_name, node, msg)
4421
        if not ignore_primary or node != instance.primary_node:
4422
          all_result = False
4423
  return all_result
4424

    
4425

    
4426
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
4427
  """Checks if a node has enough free memory.
4428

4429
  This function check if a given node has the needed amount of free
4430
  memory. In case the node has less memory or we cannot get the
4431
  information from the node, this function raise an OpPrereqError
4432
  exception.
4433

4434
  @type lu: C{LogicalUnit}
4435
  @param lu: a logical unit from which we get configuration data
4436
  @type node: C{str}
4437
  @param node: the node to check
4438
  @type reason: C{str}
4439
  @param reason: string to use in the error message
4440
  @type requested: C{int}
4441
  @param requested: the amount of memory in MiB to check for
4442
  @type hypervisor_name: C{str}
4443
  @param hypervisor_name: the hypervisor to ask for memory stats
4444
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
4445
      we cannot check the node
4446

4447
  """
4448
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
4449
  nodeinfo[node].Raise("Can't get data from node %s" % node,
4450
                       prereq=True, ecode=errors.ECODE_ENVIRON)
4451
  free_mem = nodeinfo[node].payload.get('memory_free', None)
4452
  if not isinstance(free_mem, int):
4453
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
4454
                               " was '%s'" % (node, free_mem),
4455
                               errors.ECODE_ENVIRON)
4456
  if requested > free_mem:
4457
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
4458
                               " needed %s MiB, available %s MiB" %
4459
                               (node, reason, requested, free_mem),
4460
                               errors.ECODE_NORES)
4461

    
4462

    
4463
def _CheckNodesFreeDisk(lu, nodenames, requested):
4464
  """Checks if nodes have enough free disk space in the default VG.
4465

4466
  This function check if all given nodes have the needed amount of
4467
  free disk. In case any node has less disk or we cannot get the
4468
  information from the node, this function raise an OpPrereqError
4469
  exception.
4470

4471
  @type lu: C{LogicalUnit}
4472
  @param lu: a logical unit from which we get configuration data
4473
  @type nodenames: C{list}
4474
  @param nodenames: the list of node names to check
4475
  @type requested: C{int}
4476
  @param requested: the amount of disk in MiB to check for
4477
  @raise errors.OpPrereqError: if the node doesn't have enough disk, or
4478
      we cannot check the node
4479

4480
  """
4481
  nodeinfo = lu.rpc.call_node_info(nodenames, lu.cfg.GetVGName(),
4482
                                   lu.cfg.GetHypervisorType())
4483
  for node in nodenames:
4484
    info = nodeinfo[node]
4485
    info.Raise("Cannot get current information from node %s" % node,
4486
               prereq=True, ecode=errors.ECODE_ENVIRON)
4487
    vg_free = info.payload.get("vg_free", None)
4488
    if not isinstance(vg_free, int):
4489
      raise errors.OpPrereqError("Can't compute free disk space on node %s,"
4490
                                 " result was '%s'" % (node, vg_free),
4491
                                 errors.ECODE_ENVIRON)
4492
    if requested > vg_free:
4493
      raise errors.OpPrereqError("Not enough disk space on target node %s:"
4494
                                 " required %d MiB, available %d MiB" %
4495
                                 (node, requested, vg_free),
4496
                                 errors.ECODE_NORES)
4497

    
4498

    
4499
class LUStartupInstance(LogicalUnit):
4500
  """Starts an instance.
4501

4502
  """
4503
  HPATH = "instance-start"
4504
  HTYPE = constants.HTYPE_INSTANCE
4505
  _OP_PARAMS = [
4506
    _PInstanceName,
4507
    _PForce,
4508
    ("hvparams", _EmptyDict, _TDict),
4509
    ("beparams", _EmptyDict, _TDict),
4510
    ]
4511
  REQ_BGL = False
4512

    
4513
  def CheckArguments(self):
4514
    # extra beparams
4515
    if self.op.beparams:
4516
      # fill the beparams dict
4517
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4518

    
4519
  def ExpandNames(self):
4520
    self._ExpandAndLockInstance()
4521

    
4522
  def BuildHooksEnv(self):
4523
    """Build hooks env.
4524

4525
    This runs on master, primary and secondary nodes of the instance.
4526

4527
    """
4528
    env = {
4529
      "FORCE": self.op.force,
4530
      }
4531
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4532
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4533
    return env, nl, nl
4534

    
4535
  def CheckPrereq(self):
4536
    """Check prerequisites.
4537

4538
    This checks that the instance is in the cluster.
4539

4540
    """
4541
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4542
    assert self.instance is not None, \
4543
      "Cannot retrieve locked instance %s" % self.op.instance_name
4544

    
4545
    # extra hvparams
4546
    if self.op.hvparams:
4547
      # check hypervisor parameter syntax (locally)
4548
      cluster = self.cfg.GetClusterInfo()
4549
      utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4550
      filled_hvp = cluster.FillHV(instance)
4551
      filled_hvp.update(self.op.hvparams)
4552
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
4553
      hv_type.CheckParameterSyntax(filled_hvp)
4554
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
4555

    
4556
    _CheckNodeOnline(self, instance.primary_node)
4557

    
4558
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4559
    # check bridges existence
4560
    _CheckInstanceBridgesExist(self, instance)
4561

    
4562
    remote_info = self.rpc.call_instance_info(instance.primary_node,
4563
                                              instance.name,
4564
                                              instance.hypervisor)
4565
    remote_info.Raise("Error checking node %s" % instance.primary_node,
4566
                      prereq=True, ecode=errors.ECODE_ENVIRON)
4567
    if not remote_info.payload: # not running already
4568
      _CheckNodeFreeMemory(self, instance.primary_node,
4569
                           "starting instance %s" % instance.name,
4570
                           bep[constants.BE_MEMORY], instance.hypervisor)
4571

    
4572
  def Exec(self, feedback_fn):
4573
    """Start the instance.
4574

4575
    """
4576
    instance = self.instance
4577
    force = self.op.force
4578

    
4579
    self.cfg.MarkInstanceUp(instance.name)
4580

    
4581
    node_current = instance.primary_node
4582

    
4583
    _StartInstanceDisks(self, instance, force)
4584

    
4585
    result = self.rpc.call_instance_start(node_current, instance,
4586
                                          self.op.hvparams, self.op.beparams)
4587
    msg = result.fail_msg
4588
    if msg:
4589
      _ShutdownInstanceDisks(self, instance)
4590
      raise errors.OpExecError("Could not start instance: %s" % msg)
4591

    
4592

    
4593
class LURebootInstance(LogicalUnit):
4594
  """Reboot an instance.
4595

4596
  """
4597
  HPATH = "instance-reboot"
4598
  HTYPE = constants.HTYPE_INSTANCE
4599
  _OP_PARAMS = [
4600
    _PInstanceName,
4601
    ("ignore_secondaries", False, _TBool),
4602
    ("reboot_type", _NoDefault, _TElemOf(constants.REBOOT_TYPES)),
4603
    _PShutdownTimeout,
4604
    ]
4605
  REQ_BGL = False
4606

    
4607
  def ExpandNames(self):
4608
    self._ExpandAndLockInstance()
4609

    
4610
  def BuildHooksEnv(self):
4611
    """Build hooks env.
4612

4613
    This runs on master, primary and secondary nodes of the instance.
4614

4615
    """
4616
    env = {
4617
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
4618
      "REBOOT_TYPE": self.op.reboot_type,
4619
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
4620
      }
4621
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4622
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4623
    return env, nl, nl
4624

    
4625
  def CheckPrereq(self):
4626
    """Check prerequisites.
4627

4628
    This checks that the instance is in the cluster.
4629

4630
    """
4631
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4632
    assert self.instance is not None, \
4633
      "Cannot retrieve locked instance %s" % self.op.instance_name
4634

    
4635
    _CheckNodeOnline(self, instance.primary_node)
4636

    
4637
    # check bridges existence
4638
    _CheckInstanceBridgesExist(self, instance)
4639

    
4640
  def Exec(self, feedback_fn):
4641
    """Reboot the instance.
4642

4643
    """
4644
    instance = self.instance
4645
    ignore_secondaries = self.op.ignore_secondaries
4646
    reboot_type = self.op.reboot_type
4647

    
4648
    node_current = instance.primary_node
4649

    
4650
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
4651
                       constants.INSTANCE_REBOOT_HARD]:
4652
      for disk in instance.disks:
4653
        self.cfg.SetDiskID(disk, node_current)
4654
      result = self.rpc.call_instance_reboot(node_current, instance,
4655
                                             reboot_type,
4656
                                             self.op.shutdown_timeout)
4657
      result.Raise("Could not reboot instance")
4658
    else:
4659
      result = self.rpc.call_instance_shutdown(node_current, instance,
4660
                                               self.op.shutdown_timeout)
4661
      result.Raise("Could not shutdown instance for full reboot")
4662
      _ShutdownInstanceDisks(self, instance)
4663
      _StartInstanceDisks(self, instance, ignore_secondaries)
4664
      result = self.rpc.call_instance_start(node_current, instance, None, None)
4665
      msg = result.fail_msg
4666
      if msg:
4667
        _ShutdownInstanceDisks(self, instance)
4668
        raise errors.OpExecError("Could not start instance for"
4669
                                 " full reboot: %s" % msg)
4670

    
4671
    self.cfg.MarkInstanceUp(instance.name)
4672

    
4673

    
4674
class LUShutdownInstance(LogicalUnit):
4675
  """Shutdown an instance.
4676

4677
  """
4678
  HPATH = "instance-stop"
4679
  HTYPE = constants.HTYPE_INSTANCE
4680
  _OP_PARAMS = [
4681
    _PInstanceName,
4682
    ("timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT, _TPositiveInt),
4683
    ]
4684
  REQ_BGL = False
4685

    
4686
  def ExpandNames(self):
4687
    self._ExpandAndLockInstance()
4688

    
4689
  def BuildHooksEnv(self):
4690
    """Build hooks env.
4691

4692
    This runs on master, primary and secondary nodes of the instance.
4693

4694
    """
4695
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4696
    env["TIMEOUT"] = self.op.timeout
4697
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4698
    return env, nl, nl
4699

    
4700
  def CheckPrereq(self):
4701
    """Check prerequisites.
4702

4703
    This checks that the instance is in the cluster.
4704

4705
    """
4706
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4707
    assert self.instance is not None, \
4708
      "Cannot retrieve locked instance %s" % self.op.instance_name
4709
    _CheckNodeOnline(self, self.instance.primary_node)
4710

    
4711
  def Exec(self, feedback_fn):
4712
    """Shutdown the instance.
4713

4714
    """
4715
    instance = self.instance
4716
    node_current = instance.primary_node
4717
    timeout = self.op.timeout
4718
    self.cfg.MarkInstanceDown(instance.name)
4719
    result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
4720
    msg = result.fail_msg
4721
    if msg:
4722
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
4723

    
4724
    _ShutdownInstanceDisks(self, instance)
4725

    
4726

    
4727
class LUReinstallInstance(LogicalUnit):
4728
  """Reinstall an instance.
4729

4730
  """
4731
  HPATH = "instance-reinstall"
4732
  HTYPE = constants.HTYPE_INSTANCE
4733
  _OP_PARAMS = [
4734
    _PInstanceName,
4735
    ("os_type", None, _TMaybeString),
4736
    ("force_variant", False, _TBool),
4737
    ]
4738
  REQ_BGL = False
4739

    
4740
  def ExpandNames(self):
4741
    self._ExpandAndLockInstance()
4742

    
4743
  def BuildHooksEnv(self):
4744
    """Build hooks env.
4745

4746
    This runs on master, primary and secondary nodes of the instance.
4747

4748
    """
4749
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4750
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4751
    return env, nl, nl
4752

    
4753
  def CheckPrereq(self):
4754
    """Check prerequisites.
4755

4756
    This checks that the instance is in the cluster and is not running.
4757

4758
    """
4759
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4760
    assert instance is not None, \
4761
      "Cannot retrieve locked instance %s" % self.op.instance_name
4762
    _CheckNodeOnline(self, instance.primary_node)
4763

    
4764
    if instance.disk_template == constants.DT_DISKLESS:
4765
      raise errors.OpPrereqError("Instance '%s' has no disks" %
4766
                                 self.op.instance_name,
4767
                                 errors.ECODE_INVAL)
4768
    _CheckInstanceDown(self, instance, "cannot reinstall")
4769

    
4770
    if self.op.os_type is not None:
4771
      # OS verification
4772
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
4773
      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
4774

    
4775
    self.instance = instance
4776

    
4777
  def Exec(self, feedback_fn):
4778
    """Reinstall the instance.
4779

4780
    """
4781
    inst = self.instance
4782

    
4783
    if self.op.os_type is not None:
4784
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
4785
      inst.os = self.op.os_type
4786
      self.cfg.Update(inst, feedback_fn)
4787

    
4788
    _StartInstanceDisks(self, inst, None)
4789
    try:
4790
      feedback_fn("Running the instance OS create scripts...")
4791
      # FIXME: pass debug option from opcode to backend
4792
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
4793
                                             self.op.debug_level)
4794
      result.Raise("Could not install OS for instance %s on node %s" %
4795
                   (inst.name, inst.primary_node))
4796
    finally:
4797
      _ShutdownInstanceDisks(self, inst)
4798

    
4799

    
4800
class LURecreateInstanceDisks(LogicalUnit):
4801
  """Recreate an instance's missing disks.
4802

4803
  """
4804
  HPATH = "instance-recreate-disks"
4805
  HTYPE = constants.HTYPE_INSTANCE
4806
  _OP_PARAMS = [
4807
    _PInstanceName,
4808
    ("disks", _EmptyList, _TListOf(_TPositiveInt)),
4809
    ]
4810
  REQ_BGL = False
4811

    
4812
  def ExpandNames(self):
4813
    self._ExpandAndLockInstance()
4814

    
4815
  def BuildHooksEnv(self):
4816
    """Build hooks env.
4817

4818
    This runs on master, primary and secondary nodes of the instance.
4819

4820
    """
4821
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4822
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4823
    return env, nl, nl
4824

    
4825
  def CheckPrereq(self):
4826
    """Check prerequisites.
4827

4828
    This checks that the instance is in the cluster and is not running.
4829

4830
    """
4831
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4832
    assert instance is not None, \
4833
      "Cannot retrieve locked instance %s" % self.op.instance_name
4834
    _CheckNodeOnline(self, instance.primary_node)
4835

    
4836
    if instance.disk_template == constants.DT_DISKLESS:
4837
      raise errors.OpPrereqError("Instance '%s' has no disks" %
4838
                                 self.op.instance_name, errors.ECODE_INVAL)
4839
    _CheckInstanceDown(self, instance, "cannot recreate disks")
4840

    
4841
    if not self.op.disks:
4842
      self.op.disks = range(len(instance.disks))
4843
    else:
4844
      for idx in self.op.disks:
4845
        if idx >= len(instance.disks):
4846
          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
4847
                                     errors.ECODE_INVAL)
4848

    
4849
    self.instance = instance
4850

    
4851
  def Exec(self, feedback_fn):
4852
    """Recreate the disks.
4853

4854
    """
4855
    to_skip = []
4856
    for idx, _ in enumerate(self.instance.disks):
4857
      if idx not in self.op.disks: # disk idx has not been passed in
4858
        to_skip.append(idx)
4859
        continue
4860

    
4861
    _CreateDisks(self, self.instance, to_skip=to_skip)
4862

    
4863

    
4864
class LURenameInstance(LogicalUnit):
4865
  """Rename an instance.
4866

4867
  """
4868
  HPATH = "instance-rename"
4869
  HTYPE = constants.HTYPE_INSTANCE
4870
  _OP_PARAMS = [
4871
    _PInstanceName,
4872
    ("new_name", _NoDefault, _TNonEmptyString),
4873
    ("ignore_ip", False, _TBool),
4874
    ("check_name", True, _TBool),
4875
    ]
4876

    
4877
  def BuildHooksEnv(self):
4878
    """Build hooks env.
4879

4880
    This runs on master, primary and secondary nodes of the instance.
4881

4882
    """
4883
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4884
    env["INSTANCE_NEW_NAME"] = self.op.new_name
4885
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4886
    return env, nl, nl
4887

    
4888
  def CheckPrereq(self):
4889
    """Check prerequisites.
4890

4891
    This checks that the instance is in the cluster and is not running.
4892

4893
    """
4894
    self.op.instance_name = _ExpandInstanceName(self.cfg,
4895
                                                self.op.instance_name)
4896
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4897
    assert instance is not None
4898
    _CheckNodeOnline(self, instance.primary_node)
4899
    _CheckInstanceDown(self, instance, "cannot rename")
4900
    self.instance = instance
4901

    
4902
    # new name verification
4903
    if self.op.check_name:
4904
      name_info = netutils.GetHostInfo(self.op.new_name)
4905
      self.op.new_name = name_info.name
4906

    
4907
    new_name = self.op.new_name
4908

    
4909
    instance_list = self.cfg.GetInstanceList()
4910
    if new_name in instance_list:
4911
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4912
                                 new_name, errors.ECODE_EXISTS)
4913

    
4914
    if not self.op.ignore_ip:
4915
      if netutils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
4916
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4917
                                   (name_info.ip, new_name),
4918
                                   errors.ECODE_NOTUNIQUE)
4919

    
4920
  def Exec(self, feedback_fn):
4921
    """Reinstall the instance.
4922

4923
    """
4924
    inst = self.instance
4925
    old_name = inst.name
4926

    
4927
    if inst.disk_template == constants.DT_FILE:
4928
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4929

    
4930
    self.cfg.RenameInstance(inst.name, self.op.new_name)
4931
    # Change the instance lock. This is definitely safe while we hold the BGL
4932
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
4933
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
4934

    
4935
    # re-read the instance from the configuration after rename
4936
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
4937

    
4938
    if inst.disk_template == constants.DT_FILE:
4939
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4940
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
4941
                                                     old_file_storage_dir,
4942
                                                     new_file_storage_dir)
4943
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
4944
                   " (but the instance has been renamed in Ganeti)" %
4945
                   (inst.primary_node, old_file_storage_dir,
4946
                    new_file_storage_dir))
4947

    
4948
    _StartInstanceDisks(self, inst, None)
4949
    try:
4950
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
4951
                                                 old_name, self.op.debug_level)
4952
      msg = result.fail_msg
4953
      if msg:
4954
        msg = ("Could not run OS rename script for instance %s on node %s"
4955
               " (but the instance has been renamed in Ganeti): %s" %
4956
               (inst.name, inst.primary_node, msg))
4957
        self.proc.LogWarning(msg)
4958
    finally:
4959
      _ShutdownInstanceDisks(self, inst)
4960

    
4961

    
4962
class LURemoveInstance(LogicalUnit):
4963
  """Remove an instance.
4964

4965
  """
4966
  HPATH = "instance-remove"
4967
  HTYPE = constants.HTYPE_INSTANCE
4968
  _OP_PARAMS = [
4969
    _PInstanceName,
4970
    ("ignore_failures", False, _TBool),
4971
    _PShutdownTimeout,
4972
    ]
4973
  REQ_BGL = False
4974

    
4975
  def ExpandNames(self):
4976
    self._ExpandAndLockInstance()
4977
    self.needed_locks[locking.LEVEL_NODE] = []
4978
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4979

    
4980
  def DeclareLocks(self, level):
4981
    if level == locking.LEVEL_NODE:
4982
      self._LockInstancesNodes()
4983

    
4984
  def BuildHooksEnv(self):
4985
    """Build hooks env.
4986

4987
    This runs on master, primary and secondary nodes of the instance.
4988

4989
    """
4990
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4991
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
4992
    nl = [self.cfg.GetMasterNode()]
4993
    nl_post = list(self.instance.all_nodes) + nl
4994
    return env, nl, nl_post
4995

    
4996
  def CheckPrereq(self):
4997
    """Check prerequisites.
4998

4999
    This checks that the instance is in the cluster.
5000

5001
    """
5002
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5003
    assert self.instance is not None, \
5004
      "Cannot retrieve locked instance %s" % self.op.instance_name
5005

    
5006
  def Exec(self, feedback_fn):
5007
    """Remove the instance.
5008

5009
    """
5010
    instance = self.instance
5011
    logging.info("Shutting down instance %s on node %s",
5012
                 instance.name, instance.primary_node)
5013

    
5014
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
5015
                                             self.op.shutdown_timeout)
5016
    msg = result.fail_msg
5017
    if msg:
5018
      if self.op.ignore_failures:
5019
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
5020
      else:
5021
        raise errors.OpExecError("Could not shutdown instance %s on"
5022
                                 " node %s: %s" %
5023
                                 (instance.name, instance.primary_node, msg))
5024

    
5025
    _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
5026

    
5027

    
5028
def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
5029
  """Utility function to remove an instance.
5030

5031
  """
5032
  logging.info("Removing block devices for instance %s", instance.name)
5033

    
5034
  if not _RemoveDisks(lu, instance):
5035
    if not ignore_failures:
5036
      raise errors.OpExecError("Can't remove instance's disks")
5037
    feedback_fn("Warning: can't remove instance's disks")
5038

    
5039
  logging.info("Removing instance %s out of cluster config", instance.name)
5040

    
5041
  lu.cfg.RemoveInstance(instance.name)
5042

    
5043
  assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
5044
    "Instance lock removal conflict"
5045

    
5046
  # Remove lock for the instance
5047
  lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
5048

    
5049

    
5050
class LUQueryInstances(NoHooksLU):
5051
  """Logical unit for querying instances.
5052

5053
  """
5054
  # pylint: disable-msg=W0142
5055
  _OP_PARAMS = [
5056
    ("output_fields", _NoDefault, _TListOf(_TNonEmptyString)),
5057
    ("names", _EmptyList, _TListOf(_TNonEmptyString)),
5058
    ("use_locking", False, _TBool),
5059
    ]
5060
  REQ_BGL = False
5061
  _SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
5062
                    "serial_no", "ctime", "mtime", "uuid"]
5063
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
5064
                                    "admin_state",
5065
                                    "disk_template", "ip", "mac", "bridge",
5066
                                    "nic_mode", "nic_link",
5067
                                    "sda_size", "sdb_size", "vcpus", "tags",
5068
                                    "network_port", "beparams",
5069
                                    r"(disk)\.(size)/([0-9]+)",
5070
                                    r"(disk)\.(sizes)", "disk_usage",
5071
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
5072
                                    r"(nic)\.(bridge)/([0-9]+)",
5073
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
5074
                                    r"(disk|nic)\.(count)",
5075
                                    "hvparams",
5076
                                    ] + _SIMPLE_FIELDS +
5077
                                  ["hv/%s" % name
5078
                                   for name in constants.HVS_PARAMETERS
5079
                                   if name not in constants.HVC_GLOBALS] +
5080
                                  ["be/%s" % name
5081
                                   for name in constants.BES_PARAMETERS])
5082
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state",
5083
                                   "oper_ram",
5084
                                   "oper_vcpus",
5085
                                   "status")
5086

    
5087

    
5088
  def CheckArguments(self):
5089
    _CheckOutputFields(static=self._FIELDS_STATIC,
5090
                       dynamic=self._FIELDS_DYNAMIC,
5091
                       selected=self.op.output_fields)
5092

    
5093
  def ExpandNames(self):
5094
    self.needed_locks = {}
5095
    self.share_locks[locking.LEVEL_INSTANCE] = 1
5096
    self.share_locks[locking.LEVEL_NODE] = 1
5097

    
5098
    if self.op.names:
5099
      self.wanted = _GetWantedInstances(self, self.op.names)
5100
    else:
5101
      self.wanted = locking.ALL_SET
5102

    
5103
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
5104
    self.do_locking = self.do_node_query and self.op.use_locking
5105
    if self.do_locking:
5106
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
5107
      self.needed_locks[locking.LEVEL_NODE] = []
5108
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5109

    
5110
  def DeclareLocks(self, level):
5111
    if level == locking.LEVEL_NODE and self.do_locking:
5112
      self._LockInstancesNodes()
5113

    
5114
  def Exec(self, feedback_fn):
5115
    """Computes the list of nodes and their attributes.
5116

5117
    """
5118
    # pylint: disable-msg=R0912
5119
    # way too many branches here
5120
    all_info = self.cfg.GetAllInstancesInfo()
5121
    if self.wanted == locking.ALL_SET:
5122
      # caller didn't specify instance names, so ordering is not important
5123
      if self.do_locking:
5124
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5125
      else:
5126
        instance_names = all_info.keys()
5127
      instance_names = utils.NiceSort(instance_names)
5128
    else:
5129
      # caller did specify names, so we must keep the ordering
5130
      if self.do_locking:
5131
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
5132
      else:
5133
        tgt_set = all_info.keys()
5134
      missing = set(self.wanted).difference(tgt_set)
5135
      if missing:
5136
        raise errors.OpExecError("Some instances were removed before"
5137
                                 " retrieving their data: %s" % missing)
5138
      instance_names = self.wanted
5139

    
5140
    instance_list = [all_info[iname] for iname in instance_names]
5141

    
5142
    # begin data gathering
5143

    
5144
    nodes = frozenset([inst.primary_node for inst in instance_list])
5145
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
5146

    
5147
    bad_nodes = []
5148
    off_nodes = []
5149
    if self.do_node_query:
5150
      live_data = {}
5151
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
5152
      for name in nodes:
5153
        result = node_data[name]
5154
        if result.offline:
5155
          # offline nodes will be in both lists
5156
          off_nodes.append(name)
5157
        if result.fail_msg:
5158
          bad_nodes.append(name)
5159
        else:
5160
          if result.payload:
5161
            live_data.update(result.payload)
5162
          # else no instance is alive
5163
    else:
5164
      live_data = dict([(name, {}) for name in instance_names])
5165

    
5166
    # end data gathering
5167

    
5168
    HVPREFIX = "hv/"
5169
    BEPREFIX = "be/"
5170
    output = []
5171
    cluster = self.cfg.GetClusterInfo()
5172
    for instance in instance_list:
5173
      iout = []
5174
      i_hv = cluster.FillHV(instance, skip_globals=True)
5175
      i_be = cluster.FillBE(instance)
5176
      i_nicp = [cluster.SimpleFillNIC(nic.nicparams) for nic in instance.nics]
5177
      for field in self.op.output_fields:
5178
        st_match = self._FIELDS_STATIC.Matches(field)
5179
        if field in self._SIMPLE_FIELDS:
5180
          val = getattr(instance, field)
5181
        elif field == "pnode":
5182
          val = instance.primary_node
5183
        elif field == "snodes":
5184
          val = list(instance.secondary_nodes)
5185
        elif field == "admin_state":
5186
          val = instance.admin_up
5187
        elif field == "oper_state":
5188
          if instance.primary_node in bad_nodes:
5189
            val = None
5190
          else:
5191
            val = bool(live_data.get(instance.name))
5192
        elif field == "status":
5193
          if instance.primary_node in off_nodes:
5194
            val = "ERROR_nodeoffline"
5195
          elif instance.primary_node in bad_nodes:
5196
            val = "ERROR_nodedown"
5197
          else:
5198
            running = bool(live_data.get(instance.name))
5199
            if running:
5200
              if instance.admin_up:
5201
                val = "running"
5202
              else:
5203
                val = "ERROR_up"
5204
            else:
5205
              if instance.admin_up:
5206
                val = "ERROR_down"
5207
              else:
5208
                val = "ADMIN_down"
5209
        elif field == "oper_ram":
5210
          if instance.primary_node in bad_nodes:
5211
            val = None
5212
          elif instance.name in live_data:
5213
            val = live_data[instance.name].get("memory", "?")
5214
          else:
5215
            val = "-"
5216
        elif field == "oper_vcpus":
5217
          if instance.primary_node in bad_nodes:
5218
            val = None
5219
          elif instance.name in live_data:
5220
            val = live_data[instance.name].get("vcpus", "?")
5221
          else:
5222
            val = "-"
5223
        elif field == "vcpus":
5224
          val = i_be[constants.BE_VCPUS]
5225
        elif field == "disk_template":
5226
          val = instance.disk_template
5227
        elif field == "ip":
5228
          if instance.nics:
5229
            val = instance.nics[0].ip
5230
          else:
5231
            val = None
5232
        elif field == "nic_mode":
5233
          if instance.nics:
5234
            val = i_nicp[0][constants.NIC_MODE]
5235
          else:
5236
            val = None
5237
        elif field == "nic_link":
5238
          if instance.nics:
5239
            val = i_nicp[0][constants.NIC_LINK]
5240
          else:
5241
            val = None
5242
        elif field == "bridge":
5243
          if (instance.nics and
5244
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
5245
            val = i_nicp[0][constants.NIC_LINK]
5246
          else:
5247
            val = None
5248
        elif field == "mac":
5249
          if instance.nics:
5250
            val = instance.nics[0].mac
5251
          else:
5252
            val = None
5253
        elif field == "sda_size" or field == "sdb_size":
5254
          idx = ord(field[2]) - ord('a')
5255
          try:
5256
            val = instance.FindDisk(idx).size
5257
          except errors.OpPrereqError:
5258
            val = None
5259
        elif field == "disk_usage": # total disk usage per node
5260
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
5261
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
5262
        elif field == "tags":
5263
          val = list(instance.GetTags())
5264
        elif field == "hvparams":
5265
          val = i_hv
5266
        elif (field.startswith(HVPREFIX) and
5267
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS and
5268
              field[len(HVPREFIX):] not in constants.HVC_GLOBALS):
5269
          val = i_hv.get(field[len(HVPREFIX):], None)
5270
        elif field == "beparams":
5271
          val = i_be
5272
        elif (field.startswith(BEPREFIX) and
5273
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
5274
          val = i_be.get(field[len(BEPREFIX):], None)
5275
        elif st_match and st_match.groups():
5276
          # matches a variable list
5277
          st_groups = st_match.groups()
5278
          if st_groups and st_groups[0] == "disk":
5279
            if st_groups[1] == "count":
5280
              val = len(instance.disks)
5281
            elif st_groups[1] == "sizes":
5282
              val = [disk.size for disk in instance.disks]
5283
            elif st_groups[1] == "size":
5284
              try:
5285
                val = instance.FindDisk(st_groups[2]).size
5286
              except errors.OpPrereqError:
5287
                val = None
5288
            else:
5289
              assert False, "Unhandled disk parameter"
5290
          elif st_groups[0] == "nic":
5291
            if st_groups[1] == "count":
5292
              val = len(instance.nics)
5293
            elif st_groups[1] == "macs":
5294
              val = [nic.mac for nic in instance.nics]
5295
            elif st_groups[1] == "ips":
5296
              val = [nic.ip for nic in instance.nics]
5297
            elif st_groups[1] == "modes":
5298
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
5299
            elif st_groups[1] == "links":
5300
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
5301
            elif st_groups[1] == "bridges":
5302
              val = []
5303
              for nicp in i_nicp:
5304
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
5305
                  val.append(nicp[constants.NIC_LINK])
5306
                else:
5307
                  val.append(None)
5308
            else:
5309
              # index-based item
5310
              nic_idx = int(st_groups[2])
5311
              if nic_idx >= len(instance.nics):
5312
                val = None
5313
              else:
5314
                if st_groups[1] == "mac":
5315
                  val = instance.nics[nic_idx].mac
5316
                elif st_groups[1] == "ip":
5317
                  val = instance.nics[nic_idx].ip
5318
                elif st_groups[1] == "mode":
5319
                  val = i_nicp[nic_idx][constants.NIC_MODE]
5320
                elif st_groups[1] == "link":
5321
                  val = i_nicp[nic_idx][constants.NIC_LINK]
5322
                elif st_groups[1] == "bridge":
5323
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
5324
                  if nic_mode == constants.NIC_MODE_BRIDGED:
5325
                    val = i_nicp[nic_idx][constants.NIC_LINK]
5326
                  else:
5327
                    val = None
5328
                else:
5329
                  assert False, "Unhandled NIC parameter"
5330
          else:
5331
            assert False, ("Declared but unhandled variable parameter '%s'" %
5332
                           field)
5333
        else:
5334
          assert False, "Declared but unhandled parameter '%s'" % field
5335
        iout.append(val)
5336
      output.append(iout)
5337

    
5338
    return output
5339

    
5340

    
5341
class LUFailoverInstance(LogicalUnit):
5342
  """Failover an instance.
5343

5344
  """
5345
  HPATH = "instance-failover"
5346
  HTYPE = constants.HTYPE_INSTANCE
5347
  _OP_PARAMS = [
5348
    _PInstanceName,
5349
    ("ignore_consistency", False, _TBool),
5350
    _PShutdownTimeout,
5351
    ]
5352
  REQ_BGL = False
5353

    
5354
  def ExpandNames(self):
5355
    self._ExpandAndLockInstance()
5356
    self.needed_locks[locking.LEVEL_NODE] = []
5357
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5358

    
5359
  def DeclareLocks(self, level):
5360
    if level == locking.LEVEL_NODE:
5361
      self._LockInstancesNodes()
5362

    
5363
  def BuildHooksEnv(self):
5364
    """Build hooks env.
5365

5366
    This runs on master, primary and secondary nodes of the instance.
5367

5368
    """
5369
    instance = self.instance
5370
    source_node = instance.primary_node
5371
    target_node = instance.secondary_nodes[0]
5372
    env = {
5373
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
5374
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5375
      "OLD_PRIMARY": source_node,
5376
      "OLD_SECONDARY": target_node,
5377
      "NEW_PRIMARY": target_node,
5378
      "NEW_SECONDARY": source_node,
5379
      }
5380
    env.update(_BuildInstanceHookEnvByObject(self, instance))
5381
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5382
    nl_post = list(nl)
5383
    nl_post.append(source_node)
5384
    return env, nl, nl_post
5385

    
5386
  def CheckPrereq(self):
5387
    """Check prerequisites.
5388

5389
    This checks that the instance is in the cluster.
5390

5391
    """
5392
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5393
    assert self.instance is not None, \
5394
      "Cannot retrieve locked instance %s" % self.op.instance_name
5395

    
5396
    bep = self.cfg.GetClusterInfo().FillBE(instance)
5397
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5398
      raise errors.OpPrereqError("Instance's disk layout is not"
5399
                                 " network mirrored, cannot failover.",
5400
                                 errors.ECODE_STATE)
5401

    
5402
    secondary_nodes = instance.secondary_nodes
5403
    if not secondary_nodes:
5404
      raise errors.ProgrammerError("no secondary node but using "
5405
                                   "a mirrored disk template")
5406

    
5407
    target_node = secondary_nodes[0]
5408
    _CheckNodeOnline(self, target_node)
5409
    _CheckNodeNotDrained(self, target_node)
5410
    if instance.admin_up:
5411
      # check memory requirements on the secondary node
5412
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5413
                           instance.name, bep[constants.BE_MEMORY],
5414
                           instance.hypervisor)
5415
    else:
5416
      self.LogInfo("Not checking memory on the secondary node as"
5417
                   " instance will not be started")
5418

    
5419
    # check bridge existance
5420
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5421

    
5422
  def Exec(self, feedback_fn):
5423
    """Failover an instance.
5424

5425
    The failover is done by shutting it down on its present node and
5426
    starting it on the secondary.
5427

5428
    """
5429
    instance = self.instance
5430

    
5431
    source_node = instance.primary_node
5432
    target_node = instance.secondary_nodes[0]
5433

    
5434
    if instance.admin_up:
5435
      feedback_fn("* checking disk consistency between source and target")
5436
      for dev in instance.disks:
5437
        # for drbd, these are drbd over lvm
5438
        if not _CheckDiskConsistency(self, dev, target_node, False):
5439
          if not self.op.ignore_consistency:
5440
            raise errors.OpExecError("Disk %s is degraded on target node,"
5441
                                     " aborting failover." % dev.iv_name)
5442
    else:
5443
      feedback_fn("* not checking disk consistency as instance is not running")
5444

    
5445
    feedback_fn("* shutting down instance on source node")
5446
    logging.info("Shutting down instance %s on node %s",
5447
                 instance.name, source_node)
5448

    
5449
    result = self.rpc.call_instance_shutdown(source_node, instance,
5450
                                             self.op.shutdown_timeout)
5451
    msg = result.fail_msg
5452
    if msg:
5453
      if self.op.ignore_consistency:
5454
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
5455
                             " Proceeding anyway. Please make sure node"
5456
                             " %s is down. Error details: %s",
5457
                             instance.name, source_node, source_node, msg)
5458
      else:
5459
        raise errors.OpExecError("Could not shutdown instance %s on"
5460
                                 " node %s: %s" %
5461
                                 (instance.name, source_node, msg))
5462

    
5463
    feedback_fn("* deactivating the instance's disks on source node")
5464
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
5465
      raise errors.OpExecError("Can't shut down the instance's disks.")
5466

    
5467
    instance.primary_node = target_node
5468
    # distribute new instance config to the other nodes
5469
    self.cfg.Update(instance, feedback_fn)
5470

    
5471
    # Only start the instance if it's marked as up
5472
    if instance.admin_up:
5473
      feedback_fn("* activating the instance's disks on target node")
5474
      logging.info("Starting instance %s on node %s",
5475
                   instance.name, target_node)
5476

    
5477
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5478
                                           ignore_secondaries=True)
5479
      if not disks_ok:
5480
        _ShutdownInstanceDisks(self, instance)
5481
        raise errors.OpExecError("Can't activate the instance's disks")
5482

    
5483
      feedback_fn("* starting the instance on the target node")
5484
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5485
      msg = result.fail_msg
5486
      if msg:
5487
        _ShutdownInstanceDisks(self, instance)
5488
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5489
                                 (instance.name, target_node, msg))
5490

    
5491

    
5492
class LUMigrateInstance(LogicalUnit):
5493
  """Migrate an instance.
5494

5495
  This is migration without shutting down, compared to the failover,
5496
  which is done with shutdown.
5497

5498
  """
5499
  HPATH = "instance-migrate"
5500
  HTYPE = constants.HTYPE_INSTANCE
5501
  _OP_PARAMS = [
5502
    _PInstanceName,
5503
    _PMigrationMode,
5504
    ("cleanup", False, _TBool),
5505
    ]
5506

    
5507
  REQ_BGL = False
5508

    
5509
  def ExpandNames(self):
5510
    self._ExpandAndLockInstance()
5511

    
5512
    self.needed_locks[locking.LEVEL_NODE] = []
5513
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5514

    
5515
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
5516
                                       self.op.cleanup)
5517
    self.tasklets = [self._migrater]
5518

    
5519
  def DeclareLocks(self, level):
5520
    if level == locking.LEVEL_NODE:
5521
      self._LockInstancesNodes()
5522

    
5523
  def BuildHooksEnv(self):
5524
    """Build hooks env.
5525

5526
    This runs on master, primary and secondary nodes of the instance.
5527

5528
    """
5529
    instance = self._migrater.instance
5530
    source_node = instance.primary_node
5531
    target_node = instance.secondary_nodes[0]
5532
    env = _BuildInstanceHookEnvByObject(self, instance)
5533
    env["MIGRATE_LIVE"] = self._migrater.live
5534
    env["MIGRATE_CLEANUP"] = self.op.cleanup
5535
    env.update({
5536
        "OLD_PRIMARY": source_node,
5537
        "OLD_SECONDARY": target_node,
5538
        "NEW_PRIMARY": target_node,
5539
        "NEW_SECONDARY": source_node,
5540
        })
5541
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5542
    nl_post = list(nl)
5543
    nl_post.append(source_node)
5544
    return env, nl, nl_post
5545

    
5546

    
5547
class LUMoveInstance(LogicalUnit):
5548
  """Move an instance by data-copying.
5549

5550
  """
5551
  HPATH = "instance-move"
5552
  HTYPE = constants.HTYPE_INSTANCE
5553
  _OP_PARAMS = [
5554
    _PInstanceName,
5555
    ("target_node", _NoDefault, _TNonEmptyString),
5556
    _PShutdownTimeout,
5557
    ]
5558
  REQ_BGL = False
5559

    
5560
  def ExpandNames(self):
5561
    self._ExpandAndLockInstance()
5562
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5563
    self.op.target_node = target_node
5564
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
5565
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5566

    
5567
  def DeclareLocks(self, level):
5568
    if level == locking.LEVEL_NODE:
5569
      self._LockInstancesNodes(primary_only=True)
5570

    
5571
  def BuildHooksEnv(self):
5572
    """Build hooks env.
5573

5574
    This runs on master, primary and secondary nodes of the instance.
5575

5576
    """
5577
    env = {
5578
      "TARGET_NODE": self.op.target_node,
5579
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5580
      }
5581
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5582
    nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
5583
                                       self.op.target_node]
5584
    return env, nl, nl
5585

    
5586
  def CheckPrereq(self):
5587
    """Check prerequisites.
5588

5589
    This checks that the instance is in the cluster.
5590

5591
    """
5592
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5593
    assert self.instance is not None, \
5594
      "Cannot retrieve locked instance %s" % self.op.instance_name
5595

    
5596
    node = self.cfg.GetNodeInfo(self.op.target_node)
5597
    assert node is not None, \
5598
      "Cannot retrieve locked node %s" % self.op.target_node
5599

    
5600
    self.target_node = target_node = node.name
5601

    
5602
    if target_node == instance.primary_node:
5603
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
5604
                                 (instance.name, target_node),
5605
                                 errors.ECODE_STATE)
5606

    
5607
    bep = self.cfg.GetClusterInfo().FillBE(instance)
5608

    
5609
    for idx, dsk in enumerate(instance.disks):
5610
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
5611
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
5612
                                   " cannot copy" % idx, errors.ECODE_STATE)
5613

    
5614
    _CheckNodeOnline(self, target_node)
5615
    _CheckNodeNotDrained(self, target_node)
5616

    
5617
    if instance.admin_up:
5618
      # check memory requirements on the secondary node
5619
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5620
                           instance.name, bep[constants.BE_MEMORY],
5621
                           instance.hypervisor)
5622
    else:
5623
      self.LogInfo("Not checking memory on the secondary node as"
5624
                   " instance will not be started")
5625

    
5626
    # check bridge existance
5627
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5628

    
5629
  def Exec(self, feedback_fn):
5630
    """Move an instance.
5631

5632
    The move is done by shutting it down on its present node, copying
5633
    the data over (slow) and starting it on the new node.
5634

5635
    """
5636
    instance = self.instance
5637

    
5638
    source_node = instance.primary_node
5639
    target_node = self.target_node
5640

    
5641
    self.LogInfo("Shutting down instance %s on source node %s",
5642
                 instance.name, source_node)
5643

    
5644
    result = self.rpc.call_instance_shutdown(source_node, instance,
5645
                                             self.op.shutdown_timeout)
5646
    msg = result.fail_msg
5647
    if msg:
5648
      if self.op.ignore_consistency:
5649
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
5650
                             " Proceeding anyway. Please make sure node"
5651
                             " %s is down. Error details: %s",
5652
                             instance.name, source_node, source_node, msg)
5653
      else:
5654
        raise errors.OpExecError("Could not shutdown instance %s on"
5655
                                 " node %s: %s" %
5656
                                 (instance.name, source_node, msg))
5657

    
5658
    # create the target disks
5659
    try:
5660
      _CreateDisks(self, instance, target_node=target_node)
5661
    except errors.OpExecError:
5662
      self.LogWarning("Device creation failed, reverting...")
5663
      try:
5664
        _RemoveDisks(self, instance, target_node=target_node)
5665
      finally:
5666
        self.cfg.ReleaseDRBDMinors(instance.name)
5667
        raise
5668

    
5669
    cluster_name = self.cfg.GetClusterInfo().cluster_name
5670

    
5671
    errs = []
5672
    # activate, get path, copy the data over
5673
    for idx, disk in enumerate(instance.disks):
5674
      self.LogInfo("Copying data for disk %d", idx)
5675
      result = self.rpc.call_blockdev_assemble(target_node, disk,
5676
                                               instance.name, True)
5677
      if result.fail_msg:
5678
        self.LogWarning("Can't assemble newly created disk %d: %s",
5679
                        idx, result.fail_msg)
5680
        errs.append(result.fail_msg)
5681
        break
5682
      dev_path = result.payload
5683
      result = self.rpc.call_blockdev_export(source_node, disk,
5684
                                             target_node, dev_path,
5685
                                             cluster_name)
5686
      if result.fail_msg:
5687
        self.LogWarning("Can't copy data over for disk %d: %s",
5688
                        idx, result.fail_msg)
5689
        errs.append(result.fail_msg)
5690
        break
5691

    
5692
    if errs:
5693
      self.LogWarning("Some disks failed to copy, aborting")
5694
      try:
5695
        _RemoveDisks(self, instance, target_node=target_node)
5696
      finally:
5697
        self.cfg.ReleaseDRBDMinors(instance.name)
5698
        raise errors.OpExecError("Errors during disk copy: %s" %
5699
                                 (",".join(errs),))
5700

    
5701
    instance.primary_node = target_node
5702
    self.cfg.Update(instance, feedback_fn)
5703

    
5704
    self.LogInfo("Removing the disks on the original node")
5705
    _RemoveDisks(self, instance, target_node=source_node)
5706

    
5707
    # Only start the instance if it's marked as up
5708
    if instance.admin_up:
5709
      self.LogInfo("Starting instance %s on node %s",
5710
                   instance.name, target_node)
5711

    
5712
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5713
                                           ignore_secondaries=True)
5714
      if not disks_ok:
5715
        _ShutdownInstanceDisks(self, instance)
5716
        raise errors.OpExecError("Can't activate the instance's disks")
5717

    
5718
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5719
      msg = result.fail_msg
5720
      if msg:
5721
        _ShutdownInstanceDisks(self, instance)
5722
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5723
                                 (instance.name, target_node, msg))
5724

    
5725

    
5726
class LUMigrateNode(LogicalUnit):
5727
  """Migrate all instances from a node.
5728

5729
  """
5730
  HPATH = "node-migrate"
5731
  HTYPE = constants.HTYPE_NODE
5732
  _OP_PARAMS = [
5733
    _PNodeName,
5734
    _PMigrationMode,
5735
    ]
5736
  REQ_BGL = False
5737

    
5738
  def ExpandNames(self):
5739
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5740

    
5741
    self.needed_locks = {
5742
      locking.LEVEL_NODE: [self.op.node_name],
5743
      }
5744

    
5745
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5746

    
5747
    # Create tasklets for migrating instances for all instances on this node
5748
    names = []
5749
    tasklets = []
5750

    
5751
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
5752
      logging.debug("Migrating instance %s", inst.name)
5753
      names.append(inst.name)
5754

    
5755
      tasklets.append(TLMigrateInstance(self, inst.name, False))
5756

    
5757
    self.tasklets = tasklets
5758

    
5759
    # Declare instance locks
5760
    self.needed_locks[locking.LEVEL_INSTANCE] = names
5761

    
5762
  def DeclareLocks(self, level):
5763
    if level == locking.LEVEL_NODE:
5764
      self._LockInstancesNodes()
5765

    
5766
  def BuildHooksEnv(self):
5767
    """Build hooks env.
5768

5769
    This runs on the master, the primary and all the secondaries.
5770

5771
    """
5772
    env = {
5773
      "NODE_NAME": self.op.node_name,
5774
      }
5775

    
5776
    nl = [self.cfg.GetMasterNode()]
5777

    
5778
    return (env, nl, nl)
5779

    
5780

    
5781
class TLMigrateInstance(Tasklet):
5782
  """Tasklet class for instance migration.
5783

5784
  @type live: boolean
5785
  @ivar live: whether the migration will be done live or non-live;
5786
      this variable is initalized only after CheckPrereq has run
5787

5788
  """
5789
  def __init__(self, lu, instance_name, cleanup):
5790
    """Initializes this class.
5791

5792
    """
5793
    Tasklet.__init__(self, lu)
5794

    
5795
    # Parameters
5796
    self.instance_name = instance_name
5797
    self.cleanup = cleanup
5798
    self.live = False # will be overridden later
5799

    
5800
  def CheckPrereq(self):
5801
    """Check prerequisites.
5802

5803
    This checks that the instance is in the cluster.
5804

5805
    """
5806
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
5807
    instance = self.cfg.GetInstanceInfo(instance_name)
5808
    assert instance is not None
5809

    
5810
    if instance.disk_template != constants.DT_DRBD8:
5811
      raise errors.OpPrereqError("Instance's disk layout is not"
5812
                                 " drbd8, cannot migrate.", errors.ECODE_STATE)
5813

    
5814
    secondary_nodes = instance.secondary_nodes
5815
    if not secondary_nodes:
5816
      raise errors.ConfigurationError("No secondary node but using"
5817
                                      " drbd8 disk template")
5818

    
5819
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
5820

    
5821
    target_node = secondary_nodes[0]
5822
    # check memory requirements on the secondary node
5823
    _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
5824
                         instance.name, i_be[constants.BE_MEMORY],
5825
                         instance.hypervisor)
5826

    
5827
    # check bridge existance
5828
    _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
5829

    
5830
    if not self.cleanup:
5831
      _CheckNodeNotDrained(self.lu, target_node)
5832
      result = self.rpc.call_instance_migratable(instance.primary_node,
5833
                                                 instance)
5834
      result.Raise("Can't migrate, please use failover",
5835
                   prereq=True, ecode=errors.ECODE_STATE)
5836

    
5837
    self.instance = instance
5838

    
5839
    if self.lu.op.mode is None:
5840
      # read the default value from the hypervisor
5841
      i_hv = self.cfg.GetClusterInfo().FillHV(instance, skip_globals=False)
5842
      self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
5843

    
5844
    self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
5845

    
5846
  def _WaitUntilSync(self):
5847
    """Poll with custom rpc for disk sync.
5848

5849
    This uses our own step-based rpc call.
5850

5851
    """
5852
    self.feedback_fn("* wait until resync is done")
5853
    all_done = False
5854
    while not all_done:
5855
      all_done = True
5856
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
5857
                                            self.nodes_ip,
5858
                                            self.instance.disks)
5859
      min_percent = 100
5860
      for node, nres in result.items():
5861
        nres.Raise("Cannot resync disks on node %s" % node)
5862
        node_done, node_percent = nres.payload
5863
        all_done = all_done and node_done
5864
        if node_percent is not None:
5865
          min_percent = min(min_percent, node_percent)
5866
      if not all_done:
5867
        if min_percent < 100:
5868
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
5869
        time.sleep(2)
5870

    
5871
  def _EnsureSecondary(self, node):
5872
    """Demote a node to secondary.
5873

5874
    """
5875
    self.feedback_fn("* switching node %s to secondary mode" % node)
5876

    
5877
    for dev in self.instance.disks:
5878
      self.cfg.SetDiskID(dev, node)
5879

    
5880
    result = self.rpc.call_blockdev_close(node, self.instance.name,
5881
                                          self.instance.disks)
5882
    result.Raise("Cannot change disk to secondary on node %s" % node)
5883

    
5884
  def _GoStandalone(self):
5885
    """Disconnect from the network.
5886

5887
    """
5888
    self.feedback_fn("* changing into standalone mode")
5889
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
5890
                                               self.instance.disks)
5891
    for node, nres in result.items():
5892
      nres.Raise("Cannot disconnect disks node %s" % node)
5893

    
5894
  def _GoReconnect(self, multimaster):
5895
    """Reconnect to the network.
5896

5897
    """
5898
    if multimaster:
5899
      msg = "dual-master"
5900
    else:
5901
      msg = "single-master"
5902
    self.feedback_fn("* changing disks into %s mode" % msg)
5903
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
5904
                                           self.instance.disks,
5905
                                           self.instance.name, multimaster)
5906
    for node, nres in result.items():
5907
      nres.Raise("Cannot change disks config on node %s" % node)
5908

    
5909
  def _ExecCleanup(self):
5910
    """Try to cleanup after a failed migration.
5911

5912
    The cleanup is done by:
5913
      - check that the instance is running only on one node
5914
        (and update the config if needed)
5915
      - change disks on its secondary node to secondary
5916
      - wait until disks are fully synchronized
5917
      - disconnect from the network
5918
      - change disks into single-master mode
5919
      - wait again until disks are fully synchronized
5920

5921
    """
5922
    instance = self.instance
5923
    target_node = self.target_node
5924
    source_node = self.source_node
5925

    
5926
    # check running on only one node
5927
    self.feedback_fn("* checking where the instance actually runs"
5928
                     " (if this hangs, the hypervisor might be in"
5929
                     " a bad state)")
5930
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
5931
    for node, result in ins_l.items():
5932
      result.Raise("Can't contact node %s" % node)
5933

    
5934
    runningon_source = instance.name in ins_l[source_node].payload
5935
    runningon_target = instance.name in ins_l[target_node].payload
5936

    
5937
    if runningon_source and runningon_target:
5938
      raise errors.OpExecError("Instance seems to be running on two nodes,"
5939
                               " or the hypervisor is confused. You will have"
5940
                               " to ensure manually that it runs only on one"
5941
                               " and restart this operation.")
5942

    
5943
    if not (runningon_source or runningon_target):
5944
      raise errors.OpExecError("Instance does not seem to be running at all."
5945
                               " In this case, it's safer to repair by"
5946
                               " running 'gnt-instance stop' to ensure disk"
5947
                               " shutdown, and then restarting it.")
5948

    
5949
    if runningon_target:
5950
      # the migration has actually succeeded, we need to update the config
5951
      self.feedback_fn("* instance running on secondary node (%s),"
5952
                       " updating config" % target_node)
5953
      instance.primary_node = target_node
5954
      self.cfg.Update(instance, self.feedback_fn)
5955
      demoted_node = source_node
5956
    else:
5957
      self.feedback_fn("* instance confirmed to be running on its"
5958
                       " primary node (%s)" % source_node)
5959
      demoted_node = target_node
5960

    
5961
    self._EnsureSecondary(demoted_node)
5962
    try:
5963
      self._WaitUntilSync()
5964
    except errors.OpExecError:
5965
      # we ignore here errors, since if the device is standalone, it
5966
      # won't be able to sync
5967
      pass
5968
    self._GoStandalone()
5969
    self._GoReconnect(False)
5970
    self._WaitUntilSync()
5971

    
5972
    self.feedback_fn("* done")
5973

    
5974
  def _RevertDiskStatus(self):
5975
    """Try to revert the disk status after a failed migration.
5976

5977
    """
5978
    target_node = self.target_node
5979
    try:
5980
      self._EnsureSecondary(target_node)
5981
      self._GoStandalone()
5982
      self._GoReconnect(False)
5983
      self._WaitUntilSync()
5984
    except errors.OpExecError, err:
5985
      self.lu.LogWarning("Migration failed and I can't reconnect the"
5986
                         " drives: error '%s'\n"
5987
                         "Please look and recover the instance status" %
5988
                         str(err))
5989

    
5990
  def _AbortMigration(self):
5991
    """Call the hypervisor code to abort a started migration.
5992

5993
    """
5994
    instance = self.instance
5995
    target_node = self.target_node
5996
    migration_info = self.migration_info
5997

    
5998
    abort_result = self.rpc.call_finalize_migration(target_node,
5999
                                                    instance,
6000
                                                    migration_info,
6001
                                                    False)
6002
    abort_msg = abort_result.fail_msg
6003
    if abort_msg:
6004
      logging.error("Aborting migration failed on target node %s: %s",
6005
                    target_node, abort_msg)
6006
      # Don't raise an exception here, as we stil have to try to revert the
6007
      # disk status, even if this step failed.
6008

    
6009
  def _ExecMigration(self):
6010
    """Migrate an instance.
6011

6012
    The migrate is done by:
6013
      - change the disks into dual-master mode
6014
      - wait until disks are fully synchronized again
6015
      - migrate the instance
6016
      - change disks on the new secondary node (the old primary) to secondary
6017
      - wait until disks are fully synchronized
6018
      - change disks into single-master mode
6019

6020
    """
6021
    instance = self.instance
6022
    target_node = self.target_node
6023
    source_node = self.source_node
6024

    
6025
    self.feedback_fn("* checking disk consistency between source and target")
6026
    for dev in instance.disks:
6027
      if not _CheckDiskConsistency(self.lu, dev, target_node, False):
6028
        raise errors.OpExecError("Disk %s is degraded or not fully"
6029
                                 " synchronized on target node,"
6030
                                 " aborting migrate." % dev.iv_name)
6031

    
6032
    # First get the migration information from the remote node
6033
    result = self.rpc.call_migration_info(source_node, instance)
6034
    msg = result.fail_msg
6035
    if msg:
6036
      log_err = ("Failed fetching source migration information from %s: %s" %
6037
                 (source_node, msg))
6038
      logging.error(log_err)
6039
      raise errors.OpExecError(log_err)
6040

    
6041
    self.migration_info = migration_info = result.payload
6042

    
6043
    # Then switch the disks to master/master mode
6044
    self._EnsureSecondary(target_node)
6045
    self._GoStandalone()
6046
    self._GoReconnect(True)
6047
    self._WaitUntilSync()
6048

    
6049
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
6050
    result = self.rpc.call_accept_instance(target_node,
6051
                                           instance,
6052
                                           migration_info,
6053
                                           self.nodes_ip[target_node])
6054

    
6055
    msg = result.fail_msg
6056
    if msg:
6057
      logging.error("Instance pre-migration failed, trying to revert"
6058
                    " disk status: %s", msg)
6059
      self.feedback_fn("Pre-migration failed, aborting")
6060
      self._AbortMigration()
6061
      self._RevertDiskStatus()
6062
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
6063
                               (instance.name, msg))
6064

    
6065
    self.feedback_fn("* migrating instance to %s" % target_node)
6066
    time.sleep(10)
6067
    result = self.rpc.call_instance_migrate(source_node, instance,
6068
                                            self.nodes_ip[target_node],
6069
                                            self.live)
6070
    msg = result.fail_msg
6071
    if msg:
6072
      logging.error("Instance migration failed, trying to revert"
6073
                    " disk status: %s", msg)
6074
      self.feedback_fn("Migration failed, aborting")
6075
      self._AbortMigration()
6076
      self._RevertDiskStatus()
6077
      raise errors.OpExecError("Could not migrate instance %s: %s" %
6078
                               (instance.name, msg))
6079
    time.sleep(10)
6080

    
6081
    instance.primary_node = target_node
6082
    # distribute new instance config to the other nodes
6083
    self.cfg.Update(instance, self.feedback_fn)
6084

    
6085
    result = self.rpc.call_finalize_migration(target_node,
6086
                                              instance,
6087
                                              migration_info,
6088
                                              True)
6089
    msg = result.fail_msg
6090
    if msg:
6091
      logging.error("Instance migration succeeded, but finalization failed:"
6092
                    " %s", msg)
6093
      raise errors.OpExecError("Could not finalize instance migration: %s" %
6094
                               msg)
6095

    
6096
    self._EnsureSecondary(source_node)
6097
    self._WaitUntilSync()
6098
    self._GoStandalone()
6099
    self._GoReconnect(False)
6100
    self._WaitUntilSync()
6101

    
6102
    self.feedback_fn("* done")
6103

    
6104
  def Exec(self, feedback_fn):
6105
    """Perform the migration.
6106

6107
    """
6108
    feedback_fn("Migrating instance %s" % self.instance.name)
6109

    
6110
    self.feedback_fn = feedback_fn
6111

    
6112
    self.source_node = self.instance.primary_node
6113
    self.target_node = self.instance.secondary_nodes[0]
6114
    self.all_nodes = [self.source_node, self.target_node]
6115
    self.nodes_ip = {
6116
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
6117
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
6118
      }
6119

    
6120
    if self.cleanup:
6121
      return self._ExecCleanup()
6122
    else:
6123
      return self._ExecMigration()
6124

    
6125

    
6126
def _CreateBlockDev(lu, node, instance, device, force_create,
6127
                    info, force_open):
6128
  """Create a tree of block devices on a given node.
6129

6130
  If this device type has to be created on secondaries, create it and
6131
  all its children.
6132

6133
  If not, just recurse to children keeping the same 'force' value.
6134

6135
  @param lu: the lu on whose behalf we execute
6136
  @param node: the node on which to create the device
6137
  @type instance: L{objects.Instance}
6138
  @param instance: the instance which owns the device
6139
  @type device: L{objects.Disk}
6140
  @param device: the device to create
6141
  @type force_create: boolean
6142
  @param force_create: whether to force creation of this device; this
6143
      will be change to True whenever we find a device which has
6144
      CreateOnSecondary() attribute
6145
  @param info: the extra 'metadata' we should attach to the device
6146
      (this will be represented as a LVM tag)
6147
  @type force_open: boolean
6148
  @param force_open: this parameter will be passes to the
6149
      L{backend.BlockdevCreate} function where it specifies
6150
      whether we run on primary or not, and it affects both
6151
      the child assembly and the device own Open() execution
6152

6153
  """
6154
  if device.CreateOnSecondary():
6155
    force_create = True
6156

    
6157
  if device.children:
6158
    for child in device.children:
6159
      _CreateBlockDev(lu, node, instance, child, force_create,
6160
                      info, force_open)
6161

    
6162
  if not force_create:
6163
    return
6164

    
6165
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
6166

    
6167

    
6168
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
6169
  """Create a single block device on a given node.
6170

6171
  This will not recurse over children of the device, so they must be
6172
  created in advance.
6173

6174
  @param lu: the lu on whose behalf we execute
6175
  @param node: the node on which to create the device
6176
  @type instance: L{objects.Instance}
6177
  @param instance: the instance which owns the device
6178
  @type device: L{objects.Disk}
6179
  @param device: the device to create
6180
  @param info: the extra 'metadata' we should attach to the device
6181
      (this will be represented as a LVM tag)
6182
  @type force_open: boolean
6183
  @param force_open: this parameter will be passes to the
6184
      L{backend.BlockdevCreate} function where it specifies
6185
      whether we run on primary or not, and it affects both
6186
      the child assembly and the device own Open() execution
6187

6188
  """
6189
  lu.cfg.SetDiskID(device, node)
6190
  result = lu.rpc.call_blockdev_create(node, device, device.size,
6191
                                       instance.name, force_open, info)
6192
  result.Raise("Can't create block device %s on"
6193
               " node %s for instance %s" % (device, node, instance.name))
6194
  if device.physical_id is None:
6195
    device.physical_id = result.payload
6196

    
6197

    
6198
def _GenerateUniqueNames(lu, exts):
6199
  """Generate a suitable LV name.
6200

6201
  This will generate a logical volume name for the given instance.
6202

6203
  """
6204
  results = []
6205
  for val in exts:
6206
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
6207
    results.append("%s%s" % (new_id, val))
6208
  return results
6209

    
6210

    
6211
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
6212
                         p_minor, s_minor):
6213
  """Generate a drbd8 device complete with its children.
6214

6215
  """
6216
  port = lu.cfg.AllocatePort()
6217
  vgname = lu.cfg.GetVGName()
6218
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
6219
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
6220
                          logical_id=(vgname, names[0]))
6221
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6222
                          logical_id=(vgname, names[1]))
6223
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
6224
                          logical_id=(primary, secondary, port,
6225
                                      p_minor, s_minor,
6226
                                      shared_secret),
6227
                          children=[dev_data, dev_meta],
6228
                          iv_name=iv_name)
6229
  return drbd_dev
6230

    
6231

    
6232
def _GenerateDiskTemplate(lu, template_name,
6233
                          instance_name, primary_node,
6234
                          secondary_nodes, disk_info,
6235
                          file_storage_dir, file_driver,
6236
                          base_index):
6237
  """Generate the entire disk layout for a given template type.
6238

6239
  """
6240
  #TODO: compute space requirements
6241

    
6242
  vgname = lu.cfg.GetVGName()
6243
  disk_count = len(disk_info)
6244
  disks = []
6245
  if template_name == constants.DT_DISKLESS:
6246
    pass
6247
  elif template_name == constants.DT_PLAIN:
6248
    if len(secondary_nodes) != 0:
6249
      raise errors.ProgrammerError("Wrong template configuration")
6250

    
6251
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6252
                                      for i in range(disk_count)])
6253
    for idx, disk in enumerate(disk_info):
6254
      disk_index = idx + base_index
6255
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
6256
                              logical_id=(vgname, names[idx]),
6257
                              iv_name="disk/%d" % disk_index,
6258
                              mode=disk["mode"])
6259
      disks.append(disk_dev)
6260
  elif template_name == constants.DT_DRBD8:
6261
    if len(secondary_nodes) != 1:
6262
      raise errors.ProgrammerError("Wrong template configuration")
6263
    remote_node = secondary_nodes[0]
6264
    minors = lu.cfg.AllocateDRBDMinor(
6265
      [primary_node, remote_node] * len(disk_info), instance_name)
6266

    
6267
    names = []
6268
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6269
                                               for i in range(disk_count)]):
6270
      names.append(lv_prefix + "_data")
6271
      names.append(lv_prefix + "_meta")
6272
    for idx, disk in enumerate(disk_info):
6273
      disk_index = idx + base_index
6274
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
6275
                                      disk["size"], names[idx*2:idx*2+2],
6276
                                      "disk/%d" % disk_index,
6277
                                      minors[idx*2], minors[idx*2+1])
6278
      disk_dev.mode = disk["mode"]
6279
      disks.append(disk_dev)
6280
  elif template_name == constants.DT_FILE:
6281
    if len(secondary_nodes) != 0:
6282
      raise errors.ProgrammerError("Wrong template configuration")
6283

    
6284
    _RequireFileStorage()
6285

    
6286
    for idx, disk in enumerate(disk_info):
6287
      disk_index = idx + base_index
6288
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
6289
                              iv_name="disk/%d" % disk_index,
6290
                              logical_id=(file_driver,
6291
                                          "%s/disk%d" % (file_storage_dir,
6292
                                                         disk_index)),
6293
                              mode=disk["mode"])
6294
      disks.append(disk_dev)
6295
  else:
6296
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
6297
  return disks
6298

    
6299

    
6300
def _GetInstanceInfoText(instance):
6301
  """Compute that text that should be added to the disk's metadata.
6302

6303
  """
6304
  return "originstname+%s" % instance.name
6305

    
6306

    
6307
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
6308
  """Create all disks for an instance.
6309

6310
  This abstracts away some work from AddInstance.
6311

6312
  @type lu: L{LogicalUnit}
6313
  @param lu: the logical unit on whose behalf we execute
6314
  @type instance: L{objects.Instance}
6315
  @param instance: the instance whose disks we should create
6316
  @type to_skip: list
6317
  @param to_skip: list of indices to skip
6318
  @type target_node: string
6319
  @param target_node: if passed, overrides the target node for creation
6320
  @rtype: boolean
6321
  @return: the success of the creation
6322

6323
  """
6324
  info = _GetInstanceInfoText(instance)
6325
  if target_node is None:
6326
    pnode = instance.primary_node
6327
    all_nodes = instance.all_nodes
6328
  else:
6329
    pnode = target_node
6330
    all_nodes = [pnode]
6331

    
6332
  if instance.disk_template == constants.DT_FILE:
6333
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6334
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
6335

    
6336
    result.Raise("Failed to create directory '%s' on"
6337
                 " node %s" % (file_storage_dir, pnode))
6338

    
6339
  # Note: this needs to be kept in sync with adding of disks in
6340
  # LUSetInstanceParams
6341
  for idx, device in enumerate(instance.disks):
6342
    if to_skip and idx in to_skip:
6343
      continue
6344
    logging.info("Creating volume %s for instance %s",
6345
                 device.iv_name, instance.name)
6346
    #HARDCODE
6347
    for node in all_nodes:
6348
      f_create = node == pnode
6349
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
6350

    
6351

    
6352
def _RemoveDisks(lu, instance, target_node=None):
6353
  """Remove all disks for an instance.
6354

6355
  This abstracts away some work from `AddInstance()` and
6356
  `RemoveInstance()`. Note that in case some of the devices couldn't
6357
  be removed, the removal will continue with the other ones (compare
6358
  with `_CreateDisks()`).
6359

6360
  @type lu: L{LogicalUnit}
6361
  @param lu: the logical unit on whose behalf we execute
6362
  @type instance: L{objects.Instance}
6363
  @param instance: the instance whose disks we should remove
6364
  @type target_node: string
6365
  @param target_node: used to override the node on which to remove the disks
6366
  @rtype: boolean
6367
  @return: the success of the removal
6368

6369
  """
6370
  logging.info("Removing block devices for instance %s", instance.name)
6371

    
6372
  all_result = True
6373
  for device in instance.disks:
6374
    if target_node:
6375
      edata = [(target_node, device)]
6376
    else:
6377
      edata = device.ComputeNodeTree(instance.primary_node)
6378
    for node, disk in edata:
6379
      lu.cfg.SetDiskID(disk, node)
6380
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
6381
      if msg:
6382
        lu.LogWarning("Could not remove block device %s on node %s,"
6383
                      " continuing anyway: %s", device.iv_name, node, msg)
6384
        all_result = False
6385

    
6386
  if instance.disk_template == constants.DT_FILE:
6387
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6388
    if target_node:
6389
      tgt = target_node
6390
    else:
6391
      tgt = instance.primary_node
6392
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
6393
    if result.fail_msg:
6394
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
6395
                    file_storage_dir, instance.primary_node, result.fail_msg)
6396
      all_result = False
6397

    
6398
  return all_result
6399

    
6400

    
6401
def _ComputeDiskSize(disk_template, disks):
6402
  """Compute disk size requirements in the volume group
6403

6404
  """
6405
  # Required free disk space as a function of disk and swap space
6406
  req_size_dict = {
6407
    constants.DT_DISKLESS: None,
6408
    constants.DT_PLAIN: sum(d["size"] for d in disks),
6409
    # 128 MB are added for drbd metadata for each disk
6410
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
6411
    constants.DT_FILE: None,
6412
  }
6413

    
6414
  if disk_template not in req_size_dict:
6415
    raise errors.ProgrammerError("Disk template '%s' size requirement"
6416
                                 " is unknown" %  disk_template)
6417

    
6418
  return req_size_dict[disk_template]
6419

    
6420

    
6421
def _CheckHVParams(lu, nodenames, hvname, hvparams):
6422
  """Hypervisor parameter validation.
6423

6424
  This function abstract the hypervisor parameter validation to be
6425
  used in both instance create and instance modify.
6426

6427
  @type lu: L{LogicalUnit}
6428
  @param lu: the logical unit for which we check
6429
  @type nodenames: list
6430
  @param nodenames: the list of nodes on which we should check
6431
  @type hvname: string
6432
  @param hvname: the name of the hypervisor we should use
6433
  @type hvparams: dict
6434
  @param hvparams: the parameters which we need to check
6435
  @raise errors.OpPrereqError: if the parameters are not valid
6436

6437
  """
6438
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
6439
                                                  hvname,
6440
                                                  hvparams)
6441
  for node in nodenames:
6442
    info = hvinfo[node]
6443
    if info.offline:
6444
      continue
6445
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
6446

    
6447

    
6448
def _CheckOSParams(lu, required, nodenames, osname, osparams):
6449
  """OS parameters validation.
6450

6451
  @type lu: L{LogicalUnit}
6452
  @param lu: the logical unit for which we check
6453
  @type required: boolean
6454
  @param required: whether the validation should fail if the OS is not
6455
      found
6456
  @type nodenames: list
6457
  @param nodenames: the list of nodes on which we should check
6458
  @type osname: string
6459
  @param osname: the name of the hypervisor we should use
6460
  @type osparams: dict
6461
  @param osparams: the parameters which we need to check
6462
  @raise errors.OpPrereqError: if the parameters are not valid
6463

6464
  """
6465
  result = lu.rpc.call_os_validate(required, nodenames, osname,
6466
                                   [constants.OS_VALIDATE_PARAMETERS],
6467
                                   osparams)
6468
  for node, nres in result.items():
6469
    # we don't check for offline cases since this should be run only
6470
    # against the master node and/or an instance's nodes
6471
    nres.Raise("OS Parameters validation failed on node %s" % node)
6472
    if not nres.payload:
6473
      lu.LogInfo("OS %s not found on node %s, validation skipped",
6474
                 osname, node)
6475

    
6476

    
6477
class LUCreateInstance(LogicalUnit):
6478
  """Create an instance.
6479

6480
  """
6481
  HPATH = "instance-add"
6482
  HTYPE = constants.HTYPE_INSTANCE
6483
  _OP_PARAMS = [
6484
    _PInstanceName,
6485
    ("mode", _NoDefault, _TElemOf(constants.INSTANCE_CREATE_MODES)),
6486
    ("start", True, _TBool),
6487
    ("wait_for_sync", True, _TBool),
6488
    ("ip_check", True, _TBool),
6489
    ("name_check", True, _TBool),
6490
    ("disks", _NoDefault, _TListOf(_TDict)),
6491
    ("nics", _NoDefault, _TListOf(_TDict)),
6492
    ("hvparams", _EmptyDict, _TDict),
6493
    ("beparams", _EmptyDict, _TDict),
6494
    ("osparams", _EmptyDict, _TDict),
6495
    ("no_install", None, _TMaybeBool),
6496
    ("os_type", None, _TMaybeString),
6497
    ("force_variant", False, _TBool),
6498
    ("source_handshake", None, _TOr(_TList, _TNone)),
6499
    ("source_x509_ca", None, _TOr(_TList, _TNone)),
6500
    ("source_instance_name", None, _TMaybeString),
6501
    ("src_node", None, _TMaybeString),
6502
    ("src_path", None, _TMaybeString),
6503
    ("pnode", None, _TMaybeString),
6504
    ("snode", None, _TMaybeString),
6505
    ("iallocator", None, _TMaybeString),
6506
    ("hypervisor", None, _TMaybeString),
6507
    ("disk_template", _NoDefault, _CheckDiskTemplate),
6508
    ("identify_defaults", False, _TBool),
6509
    ("file_driver", None, _TOr(_TNone, _TElemOf(constants.FILE_DRIVER))),
6510
    ("file_storage_dir", None, _TMaybeString),
6511
    ("dry_run", False, _TBool),
6512
    ]
6513
  REQ_BGL = False
6514

    
6515
  def CheckArguments(self):
6516
    """Check arguments.
6517

6518
    """
6519
    # do not require name_check to ease forward/backward compatibility
6520
    # for tools
6521
    if self.op.no_install and self.op.start:
6522
      self.LogInfo("No-installation mode selected, disabling startup")
6523
      self.op.start = False
6524
    # validate/normalize the instance name
6525
    self.op.instance_name = \
6526
      netutils.HostInfo.NormalizeName(self.op.instance_name)
6527

    
6528
    if self.op.ip_check and not self.op.name_check:
6529
      # TODO: make the ip check more flexible and not depend on the name check
6530
      raise errors.OpPrereqError("Cannot do ip checks without a name check",
6531
                                 errors.ECODE_INVAL)
6532

    
6533
    # check nics' parameter names
6534
    for nic in self.op.nics:
6535
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
6536

    
6537
    # check disks. parameter names and consistent adopt/no-adopt strategy
6538
    has_adopt = has_no_adopt = False
6539
    for disk in self.op.disks:
6540
      utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
6541
      if "adopt" in disk:
6542
        has_adopt = True
6543
      else:
6544
        has_no_adopt = True
6545
    if has_adopt and has_no_adopt:
6546
      raise errors.OpPrereqError("Either all disks are adopted or none is",
6547
                                 errors.ECODE_INVAL)
6548
    if has_adopt:
6549
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
6550
        raise errors.OpPrereqError("Disk adoption is not supported for the"
6551
                                   " '%s' disk template" %
6552
                                   self.op.disk_template,
6553
                                   errors.ECODE_INVAL)
6554
      if self.op.iallocator is not None:
6555
        raise errors.OpPrereqError("Disk adoption not allowed with an"
6556
                                   " iallocator script", errors.ECODE_INVAL)
6557
      if self.op.mode == constants.INSTANCE_IMPORT:
6558
        raise errors.OpPrereqError("Disk adoption not allowed for"
6559
                                   " instance import", errors.ECODE_INVAL)
6560

    
6561
    self.adopt_disks = has_adopt
6562

    
6563
    # instance name verification
6564
    if self.op.name_check:
6565
      self.hostname1 = netutils.GetHostInfo(self.op.instance_name)
6566
      self.op.instance_name = self.hostname1.name
6567
      # used in CheckPrereq for ip ping check
6568
      self.check_ip = self.hostname1.ip
6569
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
6570
      raise errors.OpPrereqError("Remote imports require names to be checked" %
6571
                                 errors.ECODE_INVAL)
6572
    else:
6573
      self.check_ip = None
6574

    
6575
    # file storage checks
6576
    if (self.op.file_driver and
6577
        not self.op.file_driver in constants.FILE_DRIVER):
6578
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
6579
                                 self.op.file_driver, errors.ECODE_INVAL)
6580

    
6581
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
6582
      raise errors.OpPrereqError("File storage directory path not absolute",
6583
                                 errors.ECODE_INVAL)
6584

    
6585
    ### Node/iallocator related checks
6586
    _CheckIAllocatorOrNode(self, "iallocator", "pnode")
6587

    
6588
    self._cds = _GetClusterDomainSecret()
6589

    
6590
    if self.op.mode == constants.INSTANCE_IMPORT:
6591
      # On import force_variant must be True, because if we forced it at
6592
      # initial install, our only chance when importing it back is that it
6593
      # works again!
6594
      self.op.force_variant = True
6595

    
6596
      if self.op.no_install:
6597
        self.LogInfo("No-installation mode has no effect during import")
6598

    
6599
    elif self.op.mode == constants.INSTANCE_CREATE:
6600
      if self.op.os_type is None:
6601
        raise errors.OpPrereqError("No guest OS specified",
6602
                                   errors.ECODE_INVAL)
6603
      if self.op.disk_template is None:
6604
        raise errors.OpPrereqError("No disk template specified",
6605
                                   errors.ECODE_INVAL)
6606

    
6607
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
6608
      # Check handshake to ensure both clusters have the same domain secret
6609
      src_handshake = self.op.source_handshake
6610
      if not src_handshake:
6611
        raise errors.OpPrereqError("Missing source handshake",
6612
                                   errors.ECODE_INVAL)
6613

    
6614
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
6615
                                                           src_handshake)
6616
      if errmsg:
6617
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
6618
                                   errors.ECODE_INVAL)
6619

    
6620
      # Load and check source CA
6621
      self.source_x509_ca_pem = self.op.source_x509_ca
6622
      if not self.source_x509_ca_pem:
6623
        raise errors.OpPrereqError("Missing source X509 CA",
6624
                                   errors.ECODE_INVAL)
6625

    
6626
      try:
6627
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
6628
                                                    self._cds)
6629
      except OpenSSL.crypto.Error, err:
6630
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
6631
                                   (err, ), errors.ECODE_INVAL)
6632

    
6633
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
6634
      if errcode is not None:
6635
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
6636
                                   errors.ECODE_INVAL)
6637

    
6638
      self.source_x509_ca = cert
6639

    
6640
      src_instance_name = self.op.source_instance_name
6641
      if not src_instance_name:
6642
        raise errors.OpPrereqError("Missing source instance name",
6643
                                   errors.ECODE_INVAL)
6644

    
6645
      norm_name = netutils.HostInfo.NormalizeName(src_instance_name)
6646
      self.source_instance_name = netutils.GetHostInfo(norm_name).name
6647

    
6648
    else:
6649
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
6650
                                 self.op.mode, errors.ECODE_INVAL)
6651

    
6652
  def ExpandNames(self):
6653
    """ExpandNames for CreateInstance.
6654

6655
    Figure out the right locks for instance creation.
6656

6657
    """
6658
    self.needed_locks = {}
6659

    
6660
    instance_name = self.op.instance_name
6661
    # this is just a preventive check, but someone might still add this
6662
    # instance in the meantime, and creation will fail at lock-add time
6663
    if instance_name in self.cfg.GetInstanceList():
6664
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
6665
                                 instance_name, errors.ECODE_EXISTS)
6666

    
6667
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
6668

    
6669
    if self.op.iallocator:
6670
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6671
    else:
6672
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
6673
      nodelist = [self.op.pnode]
6674
      if self.op.snode is not None:
6675
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
6676
        nodelist.append(self.op.snode)
6677
      self.needed_locks[locking.LEVEL_NODE] = nodelist
6678

    
6679
    # in case of import lock the source node too
6680
    if self.op.mode == constants.INSTANCE_IMPORT:
6681
      src_node = self.op.src_node
6682
      src_path = self.op.src_path
6683

    
6684
      if src_path is None:
6685
        self.op.src_path = src_path = self.op.instance_name
6686

    
6687
      if src_node is None:
6688
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6689
        self.op.src_node = None
6690
        if os.path.isabs(src_path):
6691
          raise errors.OpPrereqError("Importing an instance from an absolute"
6692
                                     " path requires a source node option.",
6693
                                     errors.ECODE_INVAL)
6694
      else:
6695
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
6696
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
6697
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
6698
        if not os.path.isabs(src_path):
6699
          self.op.src_path = src_path = \
6700
            utils.PathJoin(constants.EXPORT_DIR, src_path)
6701

    
6702
  def _RunAllocator(self):
6703
    """Run the allocator based on input opcode.
6704

6705
    """
6706
    nics = [n.ToDict() for n in self.nics]
6707
    ial = IAllocator(self.cfg, self.rpc,
6708
                     mode=constants.IALLOCATOR_MODE_ALLOC,
6709
                     name=self.op.instance_name,
6710
                     disk_template=self.op.disk_template,
6711
                     tags=[],
6712
                     os=self.op.os_type,
6713
                     vcpus=self.be_full[constants.BE_VCPUS],
6714
                     mem_size=self.be_full[constants.BE_MEMORY],
6715
                     disks=self.disks,
6716
                     nics=nics,
6717
                     hypervisor=self.op.hypervisor,
6718
                     )
6719

    
6720
    ial.Run(self.op.iallocator)
6721

    
6722
    if not ial.success:
6723
      raise errors.OpPrereqError("Can't compute nodes using"
6724
                                 " iallocator '%s': %s" %
6725
                                 (self.op.iallocator, ial.info),
6726
                                 errors.ECODE_NORES)
6727
    if len(ial.result) != ial.required_nodes:
6728
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6729
                                 " of nodes (%s), required %s" %
6730
                                 (self.op.iallocator, len(ial.result),
6731
                                  ial.required_nodes), errors.ECODE_FAULT)
6732
    self.op.pnode = ial.result[0]
6733
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
6734
                 self.op.instance_name, self.op.iallocator,
6735
                 utils.CommaJoin(ial.result))
6736
    if ial.required_nodes == 2:
6737
      self.op.snode = ial.result[1]
6738

    
6739
  def BuildHooksEnv(self):
6740
    """Build hooks env.
6741

6742
    This runs on master, primary and secondary nodes of the instance.
6743

6744
    """
6745
    env = {
6746
      "ADD_MODE": self.op.mode,
6747
      }
6748
    if self.op.mode == constants.INSTANCE_IMPORT:
6749
      env["SRC_NODE"] = self.op.src_node
6750
      env["SRC_PATH"] = self.op.src_path
6751
      env["SRC_IMAGES"] = self.src_images
6752

    
6753
    env.update(_BuildInstanceHookEnv(
6754
      name=self.op.instance_name,
6755
      primary_node=self.op.pnode,
6756
      secondary_nodes=self.secondaries,
6757
      status=self.op.start,
6758
      os_type=self.op.os_type,
6759
      memory=self.be_full[constants.BE_MEMORY],
6760
      vcpus=self.be_full[constants.BE_VCPUS],
6761
      nics=_NICListToTuple(self, self.nics),
6762
      disk_template=self.op.disk_template,
6763
      disks=[(d["size"], d["mode"]) for d in self.disks],
6764
      bep=self.be_full,
6765
      hvp=self.hv_full,
6766
      hypervisor_name=self.op.hypervisor,
6767
    ))
6768

    
6769
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
6770
          self.secondaries)
6771
    return env, nl, nl
6772

    
6773
  def _ReadExportInfo(self):
6774
    """Reads the export information from disk.
6775

6776
    It will override the opcode source node and path with the actual
6777
    information, if these two were not specified before.
6778

6779
    @return: the export information
6780

6781
    """
6782
    assert self.op.mode == constants.INSTANCE_IMPORT
6783

    
6784
    src_node = self.op.src_node
6785
    src_path = self.op.src_path
6786

    
6787
    if src_node is None:
6788
      locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
6789
      exp_list = self.rpc.call_export_list(locked_nodes)
6790
      found = False
6791
      for node in exp_list:
6792
        if exp_list[node].fail_msg:
6793
          continue
6794
        if src_path in exp_list[node].payload:
6795
          found = True
6796
          self.op.src_node = src_node = node
6797
          self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
6798
                                                       src_path)
6799
          break
6800
      if not found:
6801
        raise errors.OpPrereqError("No export found for relative path %s" %
6802
                                    src_path, errors.ECODE_INVAL)
6803

    
6804
    _CheckNodeOnline(self, src_node)
6805
    result = self.rpc.call_export_info(src_node, src_path)
6806
    result.Raise("No export or invalid export found in dir %s" % src_path)
6807

    
6808
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
6809
    if not export_info.has_section(constants.INISECT_EXP):
6810
      raise errors.ProgrammerError("Corrupted export config",
6811
                                   errors.ECODE_ENVIRON)
6812

    
6813
    ei_version = export_info.get(constants.INISECT_EXP, "version")
6814
    if (int(ei_version) != constants.EXPORT_VERSION):
6815
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
6816
                                 (ei_version, constants.EXPORT_VERSION),
6817
                                 errors.ECODE_ENVIRON)
6818
    return export_info
6819

    
6820
  def _ReadExportParams(self, einfo):
6821
    """Use export parameters as defaults.
6822

6823
    In case the opcode doesn't specify (as in override) some instance
6824
    parameters, then try to use them from the export information, if
6825
    that declares them.
6826

6827
    """
6828
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
6829

    
6830
    if self.op.disk_template is None:
6831
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
6832
        self.op.disk_template = einfo.get(constants.INISECT_INS,
6833
                                          "disk_template")
6834
      else:
6835
        raise errors.OpPrereqError("No disk template specified and the export"
6836
                                   " is missing the disk_template information",
6837
                                   errors.ECODE_INVAL)
6838

    
6839
    if not self.op.disks:
6840
      if einfo.has_option(constants.INISECT_INS, "disk_count"):
6841
        disks = []
6842
        # TODO: import the disk iv_name too
6843
        for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
6844
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
6845
          disks.append({"size": disk_sz})
6846
        self.op.disks = disks
6847
      else:
6848
        raise errors.OpPrereqError("No disk info specified and the export"
6849
                                   " is missing the disk information",
6850
                                   errors.ECODE_INVAL)
6851

    
6852
    if (not self.op.nics and
6853
        einfo.has_option(constants.INISECT_INS, "nic_count")):
6854
      nics = []
6855
      for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
6856
        ndict = {}
6857
        for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
6858
          v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
6859
          ndict[name] = v
6860
        nics.append(ndict)
6861
      self.op.nics = nics
6862

    
6863
    if (self.op.hypervisor is None and
6864
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
6865
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
6866
    if einfo.has_section(constants.INISECT_HYP):
6867
      # use the export parameters but do not override the ones
6868
      # specified by the user
6869
      for name, value in einfo.items(constants.INISECT_HYP):
6870
        if name not in self.op.hvparams:
6871
          self.op.hvparams[name] = value
6872

    
6873
    if einfo.has_section(constants.INISECT_BEP):
6874
      # use the parameters, without overriding
6875
      for name, value in einfo.items(constants.INISECT_BEP):
6876
        if name not in self.op.beparams:
6877
          self.op.beparams[name] = value
6878
    else:
6879
      # try to read the parameters old style, from the main section
6880
      for name in constants.BES_PARAMETERS:
6881
        if (name not in self.op.beparams and
6882
            einfo.has_option(constants.INISECT_INS, name)):
6883
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
6884

    
6885
    if einfo.has_section(constants.INISECT_OSP):
6886
      # use the parameters, without overriding
6887
      for name, value in einfo.items(constants.INISECT_OSP):
6888
        if name not in self.op.osparams:
6889
          self.op.osparams[name] = value
6890

    
6891
  def _RevertToDefaults(self, cluster):
6892
    """Revert the instance parameters to the default values.
6893

6894
    """
6895
    # hvparams
6896
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
6897
    for name in self.op.hvparams.keys():
6898
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
6899
        del self.op.hvparams[name]
6900
    # beparams
6901
    be_defs = cluster.SimpleFillBE({})
6902
    for name in self.op.beparams.keys():
6903
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
6904
        del self.op.beparams[name]
6905
    # nic params
6906
    nic_defs = cluster.SimpleFillNIC({})
6907
    for nic in self.op.nics:
6908
      for name in constants.NICS_PARAMETERS:
6909
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
6910
          del nic[name]
6911
    # osparams
6912
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
6913
    for name in self.op.osparams.keys():
6914
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
6915
        del self.op.osparams[name]
6916

    
6917
  def CheckPrereq(self):
6918
    """Check prerequisites.
6919

6920
    """
6921
    if self.op.mode == constants.INSTANCE_IMPORT:
6922
      export_info = self._ReadExportInfo()
6923
      self._ReadExportParams(export_info)
6924

    
6925
    _CheckDiskTemplate(self.op.disk_template)
6926

    
6927
    if (not self.cfg.GetVGName() and
6928
        self.op.disk_template not in constants.DTS_NOT_LVM):
6929
      raise errors.OpPrereqError("Cluster does not support lvm-based"
6930
                                 " instances", errors.ECODE_STATE)
6931

    
6932
    if self.op.hypervisor is None:
6933
      self.op.hypervisor = self.cfg.GetHypervisorType()
6934

    
6935
    cluster = self.cfg.GetClusterInfo()
6936
    enabled_hvs = cluster.enabled_hypervisors
6937
    if self.op.hypervisor not in enabled_hvs:
6938
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
6939
                                 " cluster (%s)" % (self.op.hypervisor,
6940
                                  ",".join(enabled_hvs)),
6941
                                 errors.ECODE_STATE)
6942

    
6943
    # check hypervisor parameter syntax (locally)
6944
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
6945
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
6946
                                      self.op.hvparams)
6947
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
6948
    hv_type.CheckParameterSyntax(filled_hvp)
6949
    self.hv_full = filled_hvp
6950
    # check that we don't specify global parameters on an instance
6951
    _CheckGlobalHvParams(self.op.hvparams)
6952

    
6953
    # fill and remember the beparams dict
6954
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
6955
    self.be_full = cluster.SimpleFillBE(self.op.beparams)
6956

    
6957
    # build os parameters
6958
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
6959

    
6960
    # now that hvp/bep are in final format, let's reset to defaults,
6961
    # if told to do so
6962
    if self.op.identify_defaults:
6963
      self._RevertToDefaults(cluster)
6964

    
6965
    # NIC buildup
6966
    self.nics = []
6967
    for idx, nic in enumerate(self.op.nics):
6968
      nic_mode_req = nic.get("mode", None)
6969
      nic_mode = nic_mode_req
6970
      if nic_mode is None:
6971
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
6972

    
6973
      # in routed mode, for the first nic, the default ip is 'auto'
6974
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
6975
        default_ip_mode = constants.VALUE_AUTO
6976
      else:
6977
        default_ip_mode = constants.VALUE_NONE
6978

    
6979
      # ip validity checks
6980
      ip = nic.get("ip", default_ip_mode)
6981
      if ip is None or ip.lower() == constants.VALUE_NONE:
6982
        nic_ip = None
6983
      elif ip.lower() == constants.VALUE_AUTO:
6984
        if not self.op.name_check:
6985
          raise errors.OpPrereqError("IP address set to auto but name checks"
6986
                                     " have been skipped. Aborting.",
6987
                                     errors.ECODE_INVAL)
6988
        nic_ip = self.hostname1.ip
6989
      else:
6990
        if not netutils.IsValidIP4(ip):
6991
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
6992
                                     " like a valid IP" % ip,
6993
                                     errors.ECODE_INVAL)
6994
        nic_ip = ip
6995

    
6996
      # TODO: check the ip address for uniqueness
6997
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
6998
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
6999
                                   errors.ECODE_INVAL)
7000

    
7001
      # MAC address verification
7002
      mac = nic.get("mac", constants.VALUE_AUTO)
7003
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7004
        mac = utils.NormalizeAndValidateMac(mac)
7005

    
7006
        try:
7007
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
7008
        except errors.ReservationError:
7009
          raise errors.OpPrereqError("MAC address %s already in use"
7010
                                     " in cluster" % mac,
7011
                                     errors.ECODE_NOTUNIQUE)
7012

    
7013
      # bridge verification
7014
      bridge = nic.get("bridge", None)
7015
      link = nic.get("link", None)
7016
      if bridge and link:
7017
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
7018
                                   " at the same time", errors.ECODE_INVAL)
7019
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
7020
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
7021
                                   errors.ECODE_INVAL)
7022
      elif bridge:
7023
        link = bridge
7024

    
7025
      nicparams = {}
7026
      if nic_mode_req:
7027
        nicparams[constants.NIC_MODE] = nic_mode_req
7028
      if link:
7029
        nicparams[constants.NIC_LINK] = link
7030

    
7031
      check_params = cluster.SimpleFillNIC(nicparams)
7032
      objects.NIC.CheckParameterSyntax(check_params)
7033
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
7034

    
7035
    # disk checks/pre-build
7036
    self.disks = []
7037
    for disk in self.op.disks:
7038
      mode = disk.get("mode", constants.DISK_RDWR)
7039
      if mode not in constants.DISK_ACCESS_SET:
7040
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
7041
                                   mode, errors.ECODE_INVAL)
7042
      size = disk.get("size", None)
7043
      if size is None:
7044
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
7045
      try:
7046
        size = int(size)
7047
      except (TypeError, ValueError):
7048
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
7049
                                   errors.ECODE_INVAL)
7050
      new_disk = {"size": size, "mode": mode}
7051
      if "adopt" in disk:
7052
        new_disk["adopt"] = disk["adopt"]
7053
      self.disks.append(new_disk)
7054

    
7055
    if self.op.mode == constants.INSTANCE_IMPORT:
7056

    
7057
      # Check that the new instance doesn't have less disks than the export
7058
      instance_disks = len(self.disks)
7059
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
7060
      if instance_disks < export_disks:
7061
        raise errors.OpPrereqError("Not enough disks to import."
7062
                                   " (instance: %d, export: %d)" %
7063
                                   (instance_disks, export_disks),
7064
                                   errors.ECODE_INVAL)
7065

    
7066
      disk_images = []
7067
      for idx in range(export_disks):
7068
        option = 'disk%d_dump' % idx
7069
        if export_info.has_option(constants.INISECT_INS, option):
7070
          # FIXME: are the old os-es, disk sizes, etc. useful?
7071
          export_name = export_info.get(constants.INISECT_INS, option)
7072
          image = utils.PathJoin(self.op.src_path, export_name)
7073
          disk_images.append(image)
7074
        else:
7075
          disk_images.append(False)
7076

    
7077
      self.src_images = disk_images
7078

    
7079
      old_name = export_info.get(constants.INISECT_INS, 'name')
7080
      try:
7081
        exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
7082
      except (TypeError, ValueError), err:
7083
        raise errors.OpPrereqError("Invalid export file, nic_count is not"
7084
                                   " an integer: %s" % str(err),
7085
                                   errors.ECODE_STATE)
7086
      if self.op.instance_name == old_name:
7087
        for idx, nic in enumerate(self.nics):
7088
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
7089
            nic_mac_ini = 'nic%d_mac' % idx
7090
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
7091

    
7092
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
7093

    
7094
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
7095
    if self.op.ip_check:
7096
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
7097
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
7098
                                   (self.check_ip, self.op.instance_name),
7099
                                   errors.ECODE_NOTUNIQUE)
7100

    
7101
    #### mac address generation
7102
    # By generating here the mac address both the allocator and the hooks get
7103
    # the real final mac address rather than the 'auto' or 'generate' value.
7104
    # There is a race condition between the generation and the instance object
7105
    # creation, which means that we know the mac is valid now, but we're not
7106
    # sure it will be when we actually add the instance. If things go bad
7107
    # adding the instance will abort because of a duplicate mac, and the
7108
    # creation job will fail.
7109
    for nic in self.nics:
7110
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7111
        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
7112

    
7113
    #### allocator run
7114

    
7115
    if self.op.iallocator is not None:
7116
      self._RunAllocator()
7117

    
7118
    #### node related checks
7119

    
7120
    # check primary node
7121
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
7122
    assert self.pnode is not None, \
7123
      "Cannot retrieve locked node %s" % self.op.pnode
7124
    if pnode.offline:
7125
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
7126
                                 pnode.name, errors.ECODE_STATE)
7127
    if pnode.drained:
7128
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
7129
                                 pnode.name, errors.ECODE_STATE)
7130

    
7131
    self.secondaries = []
7132

    
7133
    # mirror node verification
7134
    if self.op.disk_template in constants.DTS_NET_MIRROR:
7135
      if self.op.snode is None:
7136
        raise errors.OpPrereqError("The networked disk templates need"
7137
                                   " a mirror node", errors.ECODE_INVAL)
7138
      if self.op.snode == pnode.name:
7139
        raise errors.OpPrereqError("The secondary node cannot be the"
7140
                                   " primary node.", errors.ECODE_INVAL)
7141
      _CheckNodeOnline(self, self.op.snode)
7142
      _CheckNodeNotDrained(self, self.op.snode)
7143
      self.secondaries.append(self.op.snode)
7144

    
7145
    nodenames = [pnode.name] + self.secondaries
7146

    
7147
    req_size = _ComputeDiskSize(self.op.disk_template,
7148
                                self.disks)
7149

    
7150
    # Check lv size requirements, if not adopting
7151
    if req_size is not None and not self.adopt_disks:
7152
      _CheckNodesFreeDisk(self, nodenames, req_size)
7153

    
7154
    if self.adopt_disks: # instead, we must check the adoption data
7155
      all_lvs = set([i["adopt"] for i in self.disks])
7156
      if len(all_lvs) != len(self.disks):
7157
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
7158
                                   errors.ECODE_INVAL)
7159
      for lv_name in all_lvs:
7160
        try:
7161
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
7162
        except errors.ReservationError:
7163
          raise errors.OpPrereqError("LV named %s used by another instance" %
7164
                                     lv_name, errors.ECODE_NOTUNIQUE)
7165

    
7166
      node_lvs = self.rpc.call_lv_list([pnode.name],
7167
                                       self.cfg.GetVGName())[pnode.name]
7168
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
7169
      node_lvs = node_lvs.payload
7170
      delta = all_lvs.difference(node_lvs.keys())
7171
      if delta:
7172
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
7173
                                   utils.CommaJoin(delta),
7174
                                   errors.ECODE_INVAL)
7175
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
7176
      if online_lvs:
7177
        raise errors.OpPrereqError("Online logical volumes found, cannot"
7178
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
7179
                                   errors.ECODE_STATE)
7180
      # update the size of disk based on what is found
7181
      for dsk in self.disks:
7182
        dsk["size"] = int(float(node_lvs[dsk["adopt"]][0]))
7183

    
7184
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
7185

    
7186
    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
7187
    # check OS parameters (remotely)
7188
    _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
7189

    
7190
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
7191

    
7192
    # memory check on primary node
7193
    if self.op.start:
7194
      _CheckNodeFreeMemory(self, self.pnode.name,
7195
                           "creating instance %s" % self.op.instance_name,
7196
                           self.be_full[constants.BE_MEMORY],
7197
                           self.op.hypervisor)
7198

    
7199
    self.dry_run_result = list(nodenames)
7200

    
7201
  def Exec(self, feedback_fn):
7202
    """Create and add the instance to the cluster.
7203

7204
    """
7205
    instance = self.op.instance_name
7206
    pnode_name = self.pnode.name
7207

    
7208
    ht_kind = self.op.hypervisor
7209
    if ht_kind in constants.HTS_REQ_PORT:
7210
      network_port = self.cfg.AllocatePort()
7211
    else:
7212
      network_port = None
7213

    
7214
    if constants.ENABLE_FILE_STORAGE:
7215
      # this is needed because os.path.join does not accept None arguments
7216
      if self.op.file_storage_dir is None:
7217
        string_file_storage_dir = ""
7218
      else:
7219
        string_file_storage_dir = self.op.file_storage_dir
7220

    
7221
      # build the full file storage dir path
7222
      file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
7223
                                        string_file_storage_dir, instance)
7224
    else:
7225
      file_storage_dir = ""
7226

    
7227
    disks = _GenerateDiskTemplate(self,
7228
                                  self.op.disk_template,
7229
                                  instance, pnode_name,
7230
                                  self.secondaries,
7231
                                  self.disks,
7232
                                  file_storage_dir,
7233
                                  self.op.file_driver,
7234
                                  0)
7235

    
7236
    iobj = objects.Instance(name=instance, os=self.op.os_type,
7237
                            primary_node=pnode_name,
7238
                            nics=self.nics, disks=disks,
7239
                            disk_template=self.op.disk_template,
7240
                            admin_up=False,
7241
                            network_port=network_port,
7242
                            beparams=self.op.beparams,
7243
                            hvparams=self.op.hvparams,
7244
                            hypervisor=self.op.hypervisor,
7245
                            osparams=self.op.osparams,
7246
                            )
7247

    
7248
    if self.adopt_disks:
7249
      # rename LVs to the newly-generated names; we need to construct
7250
      # 'fake' LV disks with the old data, plus the new unique_id
7251
      tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
7252
      rename_to = []
7253
      for t_dsk, a_dsk in zip (tmp_disks, self.disks):
7254
        rename_to.append(t_dsk.logical_id)
7255
        t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
7256
        self.cfg.SetDiskID(t_dsk, pnode_name)
7257
      result = self.rpc.call_blockdev_rename(pnode_name,
7258
                                             zip(tmp_disks, rename_to))
7259
      result.Raise("Failed to rename adoped LVs")
7260
    else:
7261
      feedback_fn("* creating instance disks...")
7262
      try:
7263
        _CreateDisks(self, iobj)
7264
      except errors.OpExecError:
7265
        self.LogWarning("Device creation failed, reverting...")
7266
        try:
7267
          _RemoveDisks(self, iobj)
7268
        finally:
7269
          self.cfg.ReleaseDRBDMinors(instance)
7270
          raise
7271

    
7272
    feedback_fn("adding instance %s to cluster config" % instance)
7273

    
7274
    self.cfg.AddInstance(iobj, self.proc.GetECId())
7275

    
7276
    # Declare that we don't want to remove the instance lock anymore, as we've
7277
    # added the instance to the config
7278
    del self.remove_locks[locking.LEVEL_INSTANCE]
7279
    # Unlock all the nodes
7280
    if self.op.mode == constants.INSTANCE_IMPORT:
7281
      nodes_keep = [self.op.src_node]
7282
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
7283
                       if node != self.op.src_node]
7284
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
7285
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
7286
    else:
7287
      self.context.glm.release(locking.LEVEL_NODE)
7288
      del self.acquired_locks[locking.LEVEL_NODE]
7289

    
7290
    if self.op.wait_for_sync:
7291
      disk_abort = not _WaitForSync(self, iobj)
7292
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
7293
      # make sure the disks are not degraded (still sync-ing is ok)
7294
      time.sleep(15)
7295
      feedback_fn("* checking mirrors status")
7296
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
7297
    else:
7298
      disk_abort = False
7299

    
7300
    if disk_abort:
7301
      _RemoveDisks(self, iobj)
7302
      self.cfg.RemoveInstance(iobj.name)
7303
      # Make sure the instance lock gets removed
7304
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
7305
      raise errors.OpExecError("There are some degraded disks for"
7306
                               " this instance")
7307

    
7308
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
7309
      if self.op.mode == constants.INSTANCE_CREATE:
7310
        if not self.op.no_install:
7311
          feedback_fn("* running the instance OS create scripts...")
7312
          # FIXME: pass debug option from opcode to backend
7313
          result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
7314
                                                 self.op.debug_level)
7315
          result.Raise("Could not add os for instance %s"
7316
                       " on node %s" % (instance, pnode_name))
7317

    
7318
      elif self.op.mode == constants.INSTANCE_IMPORT:
7319
        feedback_fn("* running the instance OS import scripts...")
7320

    
7321
        transfers = []
7322

    
7323
        for idx, image in enumerate(self.src_images):
7324
          if not image:
7325
            continue
7326

    
7327
          # FIXME: pass debug option from opcode to backend
7328
          dt = masterd.instance.DiskTransfer("disk/%s" % idx,
7329
                                             constants.IEIO_FILE, (image, ),
7330
                                             constants.IEIO_SCRIPT,
7331
                                             (iobj.disks[idx], idx),
7332
                                             None)
7333
          transfers.append(dt)
7334

    
7335
        import_result = \
7336
          masterd.instance.TransferInstanceData(self, feedback_fn,
7337
                                                self.op.src_node, pnode_name,
7338
                                                self.pnode.secondary_ip,
7339
                                                iobj, transfers)
7340
        if not compat.all(import_result):
7341
          self.LogWarning("Some disks for instance %s on node %s were not"
7342
                          " imported successfully" % (instance, pnode_name))
7343

    
7344
      elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7345
        feedback_fn("* preparing remote import...")
7346
        connect_timeout = constants.RIE_CONNECT_TIMEOUT
7347
        timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
7348

    
7349
        disk_results = masterd.instance.RemoteImport(self, feedback_fn, iobj,
7350
                                                     self.source_x509_ca,
7351
                                                     self._cds, timeouts)
7352
        if not compat.all(disk_results):
7353
          # TODO: Should the instance still be started, even if some disks
7354
          # failed to import (valid for local imports, too)?
7355
          self.LogWarning("Some disks for instance %s on node %s were not"
7356
                          " imported successfully" % (instance, pnode_name))
7357

    
7358
        # Run rename script on newly imported instance
7359
        assert iobj.name == instance
7360
        feedback_fn("Running rename script for %s" % instance)
7361
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
7362
                                                   self.source_instance_name,
7363
                                                   self.op.debug_level)
7364
        if result.fail_msg:
7365
          self.LogWarning("Failed to run rename script for %s on node"
7366
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
7367

    
7368
      else:
7369
        # also checked in the prereq part
7370
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
7371
                                     % self.op.mode)
7372

    
7373
    if self.op.start:
7374
      iobj.admin_up = True
7375
      self.cfg.Update(iobj, feedback_fn)
7376
      logging.info("Starting instance %s on node %s", instance, pnode_name)
7377
      feedback_fn("* starting instance...")
7378
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
7379
      result.Raise("Could not start instance")
7380

    
7381
    return list(iobj.all_nodes)
7382

    
7383

    
7384
class LUConnectConsole(NoHooksLU):
7385
  """Connect to an instance's console.
7386

7387
  This is somewhat special in that it returns the command line that
7388
  you need to run on the master node in order to connect to the
7389
  console.
7390

7391
  """
7392
  _OP_PARAMS = [
7393
    _PInstanceName
7394
    ]
7395
  REQ_BGL = False
7396

    
7397
  def ExpandNames(self):
7398
    self._ExpandAndLockInstance()
7399

    
7400
  def CheckPrereq(self):
7401
    """Check prerequisites.
7402

7403
    This checks that the instance is in the cluster.
7404

7405
    """
7406
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7407
    assert self.instance is not None, \
7408
      "Cannot retrieve locked instance %s" % self.op.instance_name
7409
    _CheckNodeOnline(self, self.instance.primary_node)
7410

    
7411
  def Exec(self, feedback_fn):
7412
    """Connect to the console of an instance
7413

7414
    """
7415
    instance = self.instance
7416
    node = instance.primary_node
7417

    
7418
    node_insts = self.rpc.call_instance_list([node],
7419
                                             [instance.hypervisor])[node]
7420
    node_insts.Raise("Can't get node information from %s" % node)
7421

    
7422
    if instance.name not in node_insts.payload:
7423
      raise errors.OpExecError("Instance %s is not running." % instance.name)
7424

    
7425
    logging.debug("Connecting to console of %s on %s", instance.name, node)
7426

    
7427
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
7428
    cluster = self.cfg.GetClusterInfo()
7429
    # beparams and hvparams are passed separately, to avoid editing the
7430
    # instance and then saving the defaults in the instance itself.
7431
    hvparams = cluster.FillHV(instance)
7432
    beparams = cluster.FillBE(instance)
7433
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
7434

    
7435
    # build ssh cmdline
7436
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
7437

    
7438

    
7439
class LUReplaceDisks(LogicalUnit):
7440
  """Replace the disks of an instance.
7441

7442
  """
7443
  HPATH = "mirrors-replace"
7444
  HTYPE = constants.HTYPE_INSTANCE
7445
  _OP_PARAMS = [
7446
    _PInstanceName,
7447
    ("mode", _NoDefault, _TElemOf(constants.REPLACE_MODES)),
7448
    ("disks", _EmptyList, _TListOf(_TPositiveInt)),
7449
    ("remote_node", None, _TMaybeString),
7450
    ("iallocator", None, _TMaybeString),
7451
    ("early_release", False, _TBool),
7452
    ]
7453
  REQ_BGL = False
7454

    
7455
  def CheckArguments(self):
7456
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
7457
                                  self.op.iallocator)
7458

    
7459
  def ExpandNames(self):
7460
    self._ExpandAndLockInstance()
7461

    
7462
    if self.op.iallocator is not None:
7463
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7464

    
7465
    elif self.op.remote_node is not None:
7466
      remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
7467
      self.op.remote_node = remote_node
7468

    
7469
      # Warning: do not remove the locking of the new secondary here
7470
      # unless DRBD8.AddChildren is changed to work in parallel;
7471
      # currently it doesn't since parallel invocations of
7472
      # FindUnusedMinor will conflict
7473
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
7474
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7475

    
7476
    else:
7477
      self.needed_locks[locking.LEVEL_NODE] = []
7478
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7479

    
7480
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
7481
                                   self.op.iallocator, self.op.remote_node,
7482
                                   self.op.disks, False, self.op.early_release)
7483

    
7484
    self.tasklets = [self.replacer]
7485

    
7486
  def DeclareLocks(self, level):
7487
    # If we're not already locking all nodes in the set we have to declare the
7488
    # instance's primary/secondary nodes.
7489
    if (level == locking.LEVEL_NODE and
7490
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
7491
      self._LockInstancesNodes()
7492

    
7493
  def BuildHooksEnv(self):
7494
    """Build hooks env.
7495

7496
    This runs on the master, the primary and all the secondaries.
7497

7498
    """
7499
    instance = self.replacer.instance
7500
    env = {
7501
      "MODE": self.op.mode,
7502
      "NEW_SECONDARY": self.op.remote_node,
7503
      "OLD_SECONDARY": instance.secondary_nodes[0],
7504
      }
7505
    env.update(_BuildInstanceHookEnvByObject(self, instance))
7506
    nl = [
7507
      self.cfg.GetMasterNode(),
7508
      instance.primary_node,
7509
      ]
7510
    if self.op.remote_node is not None:
7511
      nl.append(self.op.remote_node)
7512
    return env, nl, nl
7513

    
7514

    
7515
class TLReplaceDisks(Tasklet):
7516
  """Replaces disks for an instance.
7517

7518
  Note: Locking is not within the scope of this class.
7519

7520
  """
7521
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
7522
               disks, delay_iallocator, early_release):
7523
    """Initializes this class.
7524

7525
    """
7526
    Tasklet.__init__(self, lu)
7527

    
7528
    # Parameters
7529
    self.instance_name = instance_name
7530
    self.mode = mode
7531
    self.iallocator_name = iallocator_name
7532
    self.remote_node = remote_node
7533
    self.disks = disks
7534
    self.delay_iallocator = delay_iallocator
7535
    self.early_release = early_release
7536

    
7537
    # Runtime data
7538
    self.instance = None
7539
    self.new_node = None
7540
    self.target_node = None
7541
    self.other_node = None
7542
    self.remote_node_info = None
7543
    self.node_secondary_ip = None
7544

    
7545
  @staticmethod
7546
  def CheckArguments(mode, remote_node, iallocator):
7547
    """Helper function for users of this class.
7548

7549
    """
7550
    # check for valid parameter combination
7551
    if mode == constants.REPLACE_DISK_CHG:
7552
      if remote_node is None and iallocator is None:
7553
        raise errors.OpPrereqError("When changing the secondary either an"
7554
                                   " iallocator script must be used or the"
7555
                                   " new node given", errors.ECODE_INVAL)
7556

    
7557
      if remote_node is not None and iallocator is not None:
7558
        raise errors.OpPrereqError("Give either the iallocator or the new"
7559
                                   " secondary, not both", errors.ECODE_INVAL)
7560

    
7561
    elif remote_node is not None or iallocator is not None:
7562
      # Not replacing the secondary
7563
      raise errors.OpPrereqError("The iallocator and new node options can"
7564
                                 " only be used when changing the"
7565
                                 " secondary node", errors.ECODE_INVAL)
7566

    
7567
  @staticmethod
7568
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
7569
    """Compute a new secondary node using an IAllocator.
7570

7571
    """
7572
    ial = IAllocator(lu.cfg, lu.rpc,
7573
                     mode=constants.IALLOCATOR_MODE_RELOC,
7574
                     name=instance_name,
7575
                     relocate_from=relocate_from)
7576

    
7577
    ial.Run(iallocator_name)
7578

    
7579
    if not ial.success:
7580
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
7581
                                 " %s" % (iallocator_name, ial.info),
7582
                                 errors.ECODE_NORES)
7583

    
7584
    if len(ial.result) != ial.required_nodes:
7585
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7586
                                 " of nodes (%s), required %s" %
7587
                                 (iallocator_name,
7588
                                  len(ial.result), ial.required_nodes),
7589
                                 errors.ECODE_FAULT)
7590

    
7591
    remote_node_name = ial.result[0]
7592

    
7593
    lu.LogInfo("Selected new secondary for instance '%s': %s",
7594
               instance_name, remote_node_name)
7595

    
7596
    return remote_node_name
7597

    
7598
  def _FindFaultyDisks(self, node_name):
7599
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
7600
                                    node_name, True)
7601

    
7602
  def CheckPrereq(self):
7603
    """Check prerequisites.
7604

7605
    This checks that the instance is in the cluster.
7606

7607
    """
7608
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
7609
    assert instance is not None, \
7610
      "Cannot retrieve locked instance %s" % self.instance_name
7611

    
7612
    if instance.disk_template != constants.DT_DRBD8:
7613
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
7614
                                 " instances", errors.ECODE_INVAL)
7615

    
7616
    if len(instance.secondary_nodes) != 1:
7617
      raise errors.OpPrereqError("The instance has a strange layout,"
7618
                                 " expected one secondary but found %d" %
7619
                                 len(instance.secondary_nodes),
7620
                                 errors.ECODE_FAULT)
7621

    
7622
    if not self.delay_iallocator:
7623
      self._CheckPrereq2()
7624

    
7625
  def _CheckPrereq2(self):
7626
    """Check prerequisites, second part.
7627

7628
    This function should always be part of CheckPrereq. It was separated and is
7629
    now called from Exec because during node evacuation iallocator was only
7630
    called with an unmodified cluster model, not taking planned changes into
7631
    account.
7632

7633
    """
7634
    instance = self.instance
7635
    secondary_node = instance.secondary_nodes[0]
7636

    
7637
    if self.iallocator_name is None:
7638
      remote_node = self.remote_node
7639
    else:
7640
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
7641
                                       instance.name, instance.secondary_nodes)
7642

    
7643
    if remote_node is not None:
7644
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
7645
      assert self.remote_node_info is not None, \
7646
        "Cannot retrieve locked node %s" % remote_node
7647
    else:
7648
      self.remote_node_info = None
7649

    
7650
    if remote_node == self.instance.primary_node:
7651
      raise errors.OpPrereqError("The specified node is the primary node of"
7652
                                 " the instance.", errors.ECODE_INVAL)
7653

    
7654
    if remote_node == secondary_node:
7655
      raise errors.OpPrereqError("The specified node is already the"
7656
                                 " secondary node of the instance.",
7657
                                 errors.ECODE_INVAL)
7658

    
7659
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
7660
                                    constants.REPLACE_DISK_CHG):
7661
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
7662
                                 errors.ECODE_INVAL)
7663

    
7664
    if self.mode == constants.REPLACE_DISK_AUTO:
7665
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
7666
      faulty_secondary = self._FindFaultyDisks(secondary_node)
7667

    
7668
      if faulty_primary and faulty_secondary:
7669
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
7670
                                   " one node and can not be repaired"
7671
                                   " automatically" % self.instance_name,
7672
                                   errors.ECODE_STATE)
7673

    
7674
      if faulty_primary:
7675
        self.disks = faulty_primary
7676
        self.target_node = instance.primary_node
7677
        self.other_node = secondary_node
7678
        check_nodes = [self.target_node, self.other_node]
7679
      elif faulty_secondary:
7680
        self.disks = faulty_secondary
7681
        self.target_node = secondary_node
7682
        self.other_node = instance.primary_node
7683
        check_nodes = [self.target_node, self.other_node]
7684
      else:
7685
        self.disks = []
7686
        check_nodes = []
7687

    
7688
    else:
7689
      # Non-automatic modes
7690
      if self.mode == constants.REPLACE_DISK_PRI:
7691
        self.target_node = instance.primary_node
7692
        self.other_node = secondary_node
7693
        check_nodes = [self.target_node, self.other_node]
7694

    
7695
      elif self.mode == constants.REPLACE_DISK_SEC:
7696
        self.target_node = secondary_node
7697
        self.other_node = instance.primary_node
7698
        check_nodes = [self.target_node, self.other_node]
7699

    
7700
      elif self.mode == constants.REPLACE_DISK_CHG:
7701
        self.new_node = remote_node
7702
        self.other_node = instance.primary_node
7703
        self.target_node = secondary_node
7704
        check_nodes = [self.new_node, self.other_node]
7705

    
7706
        _CheckNodeNotDrained(self.lu, remote_node)
7707

    
7708
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
7709
        assert old_node_info is not None
7710
        if old_node_info.offline and not self.early_release:
7711
          # doesn't make sense to delay the release
7712
          self.early_release = True
7713
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
7714
                          " early-release mode", secondary_node)
7715

    
7716
      else:
7717
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
7718
                                     self.mode)
7719

    
7720
      # If not specified all disks should be replaced
7721
      if not self.disks:
7722
        self.disks = range(len(self.instance.disks))
7723

    
7724
    for node in check_nodes:
7725
      _CheckNodeOnline(self.lu, node)
7726

    
7727
    # Check whether disks are valid
7728
    for disk_idx in self.disks:
7729
      instance.FindDisk(disk_idx)
7730

    
7731
    # Get secondary node IP addresses
7732
    node_2nd_ip = {}
7733

    
7734
    for node_name in [self.target_node, self.other_node, self.new_node]:
7735
      if node_name is not None:
7736
        node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
7737

    
7738
    self.node_secondary_ip = node_2nd_ip
7739

    
7740
  def Exec(self, feedback_fn):
7741
    """Execute disk replacement.
7742

7743
    This dispatches the disk replacement to the appropriate handler.
7744

7745
    """
7746
    if self.delay_iallocator:
7747
      self._CheckPrereq2()
7748

    
7749
    if not self.disks:
7750
      feedback_fn("No disks need replacement")
7751
      return
7752

    
7753
    feedback_fn("Replacing disk(s) %s for %s" %
7754
                (utils.CommaJoin(self.disks), self.instance.name))
7755

    
7756
    activate_disks = (not self.instance.admin_up)
7757

    
7758
    # Activate the instance disks if we're replacing them on a down instance
7759
    if activate_disks:
7760
      _StartInstanceDisks(self.lu, self.instance, True)
7761

    
7762
    try:
7763
      # Should we replace the secondary node?
7764
      if self.new_node is not None:
7765
        fn = self._ExecDrbd8Secondary
7766
      else:
7767
        fn = self._ExecDrbd8DiskOnly
7768

    
7769
      return fn(feedback_fn)
7770

    
7771
    finally:
7772
      # Deactivate the instance disks if we're replacing them on a
7773
      # down instance
7774
      if activate_disks:
7775
        _SafeShutdownInstanceDisks(self.lu, self.instance)
7776

    
7777
  def _CheckVolumeGroup(self, nodes):
7778
    self.lu.LogInfo("Checking volume groups")
7779

    
7780
    vgname = self.cfg.GetVGName()
7781

    
7782
    # Make sure volume group exists on all involved nodes
7783
    results = self.rpc.call_vg_list(nodes)
7784
    if not results:
7785
      raise errors.OpExecError("Can't list volume groups on the nodes")
7786

    
7787
    for node in nodes:
7788
      res = results[node]
7789
      res.Raise("Error checking node %s" % node)
7790
      if vgname not in res.payload:
7791
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
7792
                                 (vgname, node))
7793

    
7794
  def _CheckDisksExistence(self, nodes):
7795
    # Check disk existence
7796
    for idx, dev in enumerate(self.instance.disks):
7797
      if idx not in self.disks:
7798
        continue
7799

    
7800
      for node in nodes:
7801
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
7802
        self.cfg.SetDiskID(dev, node)
7803

    
7804
        result = self.rpc.call_blockdev_find(node, dev)
7805

    
7806
        msg = result.fail_msg
7807
        if msg or not result.payload:
7808
          if not msg:
7809
            msg = "disk not found"
7810
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
7811
                                   (idx, node, msg))
7812

    
7813
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
7814
    for idx, dev in enumerate(self.instance.disks):
7815
      if idx not in self.disks:
7816
        continue
7817

    
7818
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
7819
                      (idx, node_name))
7820

    
7821
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
7822
                                   ldisk=ldisk):
7823
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
7824
                                 " replace disks for instance %s" %
7825
                                 (node_name, self.instance.name))
7826

    
7827
  def _CreateNewStorage(self, node_name):
7828
    vgname = self.cfg.GetVGName()
7829
    iv_names = {}
7830

    
7831
    for idx, dev in enumerate(self.instance.disks):
7832
      if idx not in self.disks:
7833
        continue
7834

    
7835
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
7836

    
7837
      self.cfg.SetDiskID(dev, node_name)
7838

    
7839
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
7840
      names = _GenerateUniqueNames(self.lu, lv_names)
7841

    
7842
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
7843
                             logical_id=(vgname, names[0]))
7844
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
7845
                             logical_id=(vgname, names[1]))
7846

    
7847
      new_lvs = [lv_data, lv_meta]
7848
      old_lvs = dev.children
7849
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
7850

    
7851
      # we pass force_create=True to force the LVM creation
7852
      for new_lv in new_lvs:
7853
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
7854
                        _GetInstanceInfoText(self.instance), False)
7855

    
7856
    return iv_names
7857

    
7858
  def _CheckDevices(self, node_name, iv_names):
7859
    for name, (dev, _, _) in iv_names.iteritems():
7860
      self.cfg.SetDiskID(dev, node_name)
7861

    
7862
      result = self.rpc.call_blockdev_find(node_name, dev)
7863

    
7864
      msg = result.fail_msg
7865
      if msg or not result.payload:
7866
        if not msg:
7867
          msg = "disk not found"
7868
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
7869
                                 (name, msg))
7870

    
7871
      if result.payload.is_degraded:
7872
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
7873

    
7874
  def _RemoveOldStorage(self, node_name, iv_names):
7875
    for name, (_, old_lvs, _) in iv_names.iteritems():
7876
      self.lu.LogInfo("Remove logical volumes for %s" % name)
7877

    
7878
      for lv in old_lvs:
7879
        self.cfg.SetDiskID(lv, node_name)
7880

    
7881
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
7882
        if msg:
7883
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
7884
                             hint="remove unused LVs manually")
7885

    
7886
  def _ReleaseNodeLock(self, node_name):
7887
    """Releases the lock for a given node."""
7888
    self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
7889

    
7890
  def _ExecDrbd8DiskOnly(self, feedback_fn):
7891
    """Replace a disk on the primary or secondary for DRBD 8.
7892

7893
    The algorithm for replace is quite complicated:
7894

7895
      1. for each disk to be replaced:
7896

7897
        1. create new LVs on the target node with unique names
7898
        1. detach old LVs from the drbd device
7899
        1. rename old LVs to name_replaced.<time_t>
7900
        1. rename new LVs to old LVs
7901
        1. attach the new LVs (with the old names now) to the drbd device
7902

7903
      1. wait for sync across all devices
7904

7905
      1. for each modified disk:
7906

7907
        1. remove old LVs (which have the name name_replaces.<time_t>)
7908

7909
    Failures are not very well handled.
7910

7911
    """
7912
    steps_total = 6
7913

    
7914
    # Step: check device activation
7915
    self.lu.LogStep(1, steps_total, "Check device existence")
7916
    self._CheckDisksExistence([self.other_node, self.target_node])
7917
    self._CheckVolumeGroup([self.target_node, self.other_node])
7918

    
7919
    # Step: check other node consistency
7920
    self.lu.LogStep(2, steps_total, "Check peer consistency")
7921
    self._CheckDisksConsistency(self.other_node,
7922
                                self.other_node == self.instance.primary_node,
7923
                                False)
7924

    
7925
    # Step: create new storage
7926
    self.lu.LogStep(3, steps_total, "Allocate new storage")
7927
    iv_names = self._CreateNewStorage(self.target_node)
7928

    
7929
    # Step: for each lv, detach+rename*2+attach
7930
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
7931
    for dev, old_lvs, new_lvs in iv_names.itervalues():
7932
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
7933

    
7934
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
7935
                                                     old_lvs)
7936
      result.Raise("Can't detach drbd from local storage on node"
7937
                   " %s for device %s" % (self.target_node, dev.iv_name))
7938
      #dev.children = []
7939
      #cfg.Update(instance)
7940

    
7941
      # ok, we created the new LVs, so now we know we have the needed
7942
      # storage; as such, we proceed on the target node to rename
7943
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
7944
      # using the assumption that logical_id == physical_id (which in
7945
      # turn is the unique_id on that node)
7946

    
7947
      # FIXME(iustin): use a better name for the replaced LVs
7948
      temp_suffix = int(time.time())
7949
      ren_fn = lambda d, suff: (d.physical_id[0],
7950
                                d.physical_id[1] + "_replaced-%s" % suff)
7951

    
7952
      # Build the rename list based on what LVs exist on the node
7953
      rename_old_to_new = []
7954
      for to_ren in old_lvs:
7955
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
7956
        if not result.fail_msg and result.payload:
7957
          # device exists
7958
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
7959

    
7960
      self.lu.LogInfo("Renaming the old LVs on the target node")
7961
      result = self.rpc.call_blockdev_rename(self.target_node,
7962
                                             rename_old_to_new)
7963
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
7964

    
7965
      # Now we rename the new LVs to the old LVs
7966
      self.lu.LogInfo("Renaming the new LVs on the target node")
7967
      rename_new_to_old = [(new, old.physical_id)
7968
                           for old, new in zip(old_lvs, new_lvs)]
7969
      result = self.rpc.call_blockdev_rename(self.target_node,
7970
                                             rename_new_to_old)
7971
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
7972

    
7973
      for old, new in zip(old_lvs, new_lvs):
7974
        new.logical_id = old.logical_id
7975
        self.cfg.SetDiskID(new, self.target_node)
7976

    
7977
      for disk in old_lvs:
7978
        disk.logical_id = ren_fn(disk, temp_suffix)
7979
        self.cfg.SetDiskID(disk, self.target_node)
7980

    
7981
      # Now that the new lvs have the old name, we can add them to the device
7982
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
7983
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
7984
                                                  new_lvs)
7985
      msg = result.fail_msg
7986
      if msg:
7987
        for new_lv in new_lvs:
7988
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
7989
                                               new_lv).fail_msg
7990
          if msg2:
7991
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
7992
                               hint=("cleanup manually the unused logical"
7993
                                     "volumes"))
7994
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
7995

    
7996
      dev.children = new_lvs
7997

    
7998
      self.cfg.Update(self.instance, feedback_fn)
7999

    
8000
    cstep = 5
8001
    if self.early_release:
8002
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8003
      cstep += 1
8004
      self._RemoveOldStorage(self.target_node, iv_names)
8005
      # WARNING: we release both node locks here, do not do other RPCs
8006
      # than WaitForSync to the primary node
8007
      self._ReleaseNodeLock([self.target_node, self.other_node])
8008

    
8009
    # Wait for sync
8010
    # This can fail as the old devices are degraded and _WaitForSync
8011
    # does a combined result over all disks, so we don't check its return value
8012
    self.lu.LogStep(cstep, steps_total, "Sync devices")
8013
    cstep += 1
8014
    _WaitForSync(self.lu, self.instance)
8015

    
8016
    # Check all devices manually
8017
    self._CheckDevices(self.instance.primary_node, iv_names)
8018

    
8019
    # Step: remove old storage
8020
    if not self.early_release:
8021
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8022
      cstep += 1
8023
      self._RemoveOldStorage(self.target_node, iv_names)
8024

    
8025
  def _ExecDrbd8Secondary(self, feedback_fn):
8026
    """Replace the secondary node for DRBD 8.
8027

8028
    The algorithm for replace is quite complicated:
8029
      - for all disks of the instance:
8030
        - create new LVs on the new node with same names
8031
        - shutdown the drbd device on the old secondary
8032
        - disconnect the drbd network on the primary
8033
        - create the drbd device on the new secondary
8034
        - network attach the drbd on the primary, using an artifice:
8035
          the drbd code for Attach() will connect to the network if it
8036
          finds a device which is connected to the good local disks but
8037
          not network enabled
8038
      - wait for sync across all devices
8039
      - remove all disks from the old secondary
8040

8041
    Failures are not very well handled.
8042

8043
    """
8044
    steps_total = 6
8045

    
8046
    # Step: check device activation
8047
    self.lu.LogStep(1, steps_total, "Check device existence")
8048
    self._CheckDisksExistence([self.instance.primary_node])
8049
    self._CheckVolumeGroup([self.instance.primary_node])
8050

    
8051
    # Step: check other node consistency
8052
    self.lu.LogStep(2, steps_total, "Check peer consistency")
8053
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
8054

    
8055
    # Step: create new storage
8056
    self.lu.LogStep(3, steps_total, "Allocate new storage")
8057
    for idx, dev in enumerate(self.instance.disks):
8058
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
8059
                      (self.new_node, idx))
8060
      # we pass force_create=True to force LVM creation
8061
      for new_lv in dev.children:
8062
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
8063
                        _GetInstanceInfoText(self.instance), False)
8064

    
8065
    # Step 4: dbrd minors and drbd setups changes
8066
    # after this, we must manually remove the drbd minors on both the
8067
    # error and the success paths
8068
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8069
    minors = self.cfg.AllocateDRBDMinor([self.new_node
8070
                                         for dev in self.instance.disks],
8071
                                        self.instance.name)
8072
    logging.debug("Allocated minors %r", minors)
8073

    
8074
    iv_names = {}
8075
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
8076
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
8077
                      (self.new_node, idx))
8078
      # create new devices on new_node; note that we create two IDs:
8079
      # one without port, so the drbd will be activated without
8080
      # networking information on the new node at this stage, and one
8081
      # with network, for the latter activation in step 4
8082
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
8083
      if self.instance.primary_node == o_node1:
8084
        p_minor = o_minor1
8085
      else:
8086
        assert self.instance.primary_node == o_node2, "Three-node instance?"
8087
        p_minor = o_minor2
8088

    
8089
      new_alone_id = (self.instance.primary_node, self.new_node, None,
8090
                      p_minor, new_minor, o_secret)
8091
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
8092
                    p_minor, new_minor, o_secret)
8093

    
8094
      iv_names[idx] = (dev, dev.children, new_net_id)
8095
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
8096
                    new_net_id)
8097
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
8098
                              logical_id=new_alone_id,
8099
                              children=dev.children,
8100
                              size=dev.size)
8101
      try:
8102
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
8103
                              _GetInstanceInfoText(self.instance), False)
8104
      except errors.GenericError:
8105
        self.cfg.ReleaseDRBDMinors(self.instance.name)
8106
        raise
8107

    
8108
    # We have new devices, shutdown the drbd on the old secondary
8109
    for idx, dev in enumerate(self.instance.disks):
8110
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
8111
      self.cfg.SetDiskID(dev, self.target_node)
8112
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
8113
      if msg:
8114
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
8115
                           "node: %s" % (idx, msg),
8116
                           hint=("Please cleanup this device manually as"
8117
                                 " soon as possible"))
8118

    
8119
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
8120
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
8121
                                               self.node_secondary_ip,
8122
                                               self.instance.disks)\
8123
                                              [self.instance.primary_node]
8124

    
8125
    msg = result.fail_msg
8126
    if msg:
8127
      # detaches didn't succeed (unlikely)
8128
      self.cfg.ReleaseDRBDMinors(self.instance.name)
8129
      raise errors.OpExecError("Can't detach the disks from the network on"
8130
                               " old node: %s" % (msg,))
8131

    
8132
    # if we managed to detach at least one, we update all the disks of
8133
    # the instance to point to the new secondary
8134
    self.lu.LogInfo("Updating instance configuration")
8135
    for dev, _, new_logical_id in iv_names.itervalues():
8136
      dev.logical_id = new_logical_id
8137
      self.cfg.SetDiskID(dev, self.instance.primary_node)
8138

    
8139
    self.cfg.Update(self.instance, feedback_fn)
8140

    
8141
    # and now perform the drbd attach
8142
    self.lu.LogInfo("Attaching primary drbds to new secondary"
8143
                    " (standalone => connected)")
8144
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
8145
                                            self.new_node],
8146
                                           self.node_secondary_ip,
8147
                                           self.instance.disks,
8148
                                           self.instance.name,
8149
                                           False)
8150
    for to_node, to_result in result.items():
8151
      msg = to_result.fail_msg
8152
      if msg:
8153
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
8154
                           to_node, msg,
8155
                           hint=("please do a gnt-instance info to see the"
8156
                                 " status of disks"))
8157
    cstep = 5
8158
    if self.early_release:
8159
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8160
      cstep += 1
8161
      self._RemoveOldStorage(self.target_node, iv_names)
8162
      # WARNING: we release all node locks here, do not do other RPCs
8163
      # than WaitForSync to the primary node
8164
      self._ReleaseNodeLock([self.instance.primary_node,
8165
                             self.target_node,
8166
                             self.new_node])
8167

    
8168
    # Wait for sync
8169
    # This can fail as the old devices are degraded and _WaitForSync
8170
    # does a combined result over all disks, so we don't check its return value
8171
    self.lu.LogStep(cstep, steps_total, "Sync devices")
8172
    cstep += 1
8173
    _WaitForSync(self.lu, self.instance)
8174

    
8175
    # Check all devices manually
8176
    self._CheckDevices(self.instance.primary_node, iv_names)
8177

    
8178
    # Step: remove old storage
8179
    if not self.early_release:
8180
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8181
      self._RemoveOldStorage(self.target_node, iv_names)
8182

    
8183

    
8184
class LURepairNodeStorage(NoHooksLU):
8185
  """Repairs the volume group on a node.
8186

8187
  """
8188
  _OP_PARAMS = [
8189
    _PNodeName,
8190
    ("storage_type", _NoDefault, _CheckStorageType),
8191
    ("name", _NoDefault, _TNonEmptyString),
8192
    ("ignore_consistency", False, _TBool),
8193
    ]
8194
  REQ_BGL = False
8195

    
8196
  def CheckArguments(self):
8197
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
8198

    
8199
    storage_type = self.op.storage_type
8200

    
8201
    if (constants.SO_FIX_CONSISTENCY not in
8202
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
8203
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
8204
                                 " repaired" % storage_type,
8205
                                 errors.ECODE_INVAL)
8206

    
8207
  def ExpandNames(self):
8208
    self.needed_locks = {
8209
      locking.LEVEL_NODE: [self.op.node_name],
8210
      }
8211

    
8212
  def _CheckFaultyDisks(self, instance, node_name):
8213
    """Ensure faulty disks abort the opcode or at least warn."""
8214
    try:
8215
      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
8216
                                  node_name, True):
8217
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
8218
                                   " node '%s'" % (instance.name, node_name),
8219
                                   errors.ECODE_STATE)
8220
    except errors.OpPrereqError, err:
8221
      if self.op.ignore_consistency:
8222
        self.proc.LogWarning(str(err.args[0]))
8223
      else:
8224
        raise
8225

    
8226
  def CheckPrereq(self):
8227
    """Check prerequisites.
8228

8229
    """
8230
    # Check whether any instance on this node has faulty disks
8231
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
8232
      if not inst.admin_up:
8233
        continue
8234
      check_nodes = set(inst.all_nodes)
8235
      check_nodes.discard(self.op.node_name)
8236
      for inst_node_name in check_nodes:
8237
        self._CheckFaultyDisks(inst, inst_node_name)
8238

    
8239
  def Exec(self, feedback_fn):
8240
    feedback_fn("Repairing storage unit '%s' on %s ..." %
8241
                (self.op.name, self.op.node_name))
8242

    
8243
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
8244
    result = self.rpc.call_storage_execute(self.op.node_name,
8245
                                           self.op.storage_type, st_args,
8246
                                           self.op.name,
8247
                                           constants.SO_FIX_CONSISTENCY)
8248
    result.Raise("Failed to repair storage unit '%s' on %s" %
8249
                 (self.op.name, self.op.node_name))
8250

    
8251

    
8252
class LUNodeEvacuationStrategy(NoHooksLU):
8253
  """Computes the node evacuation strategy.
8254

8255
  """
8256
  _OP_PARAMS = [
8257
    ("nodes", _NoDefault, _TListOf(_TNonEmptyString)),
8258
    ("remote_node", None, _TMaybeString),
8259
    ("iallocator", None, _TMaybeString),
8260
    ]
8261
  REQ_BGL = False
8262

    
8263
  def CheckArguments(self):
8264
    _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
8265

    
8266
  def ExpandNames(self):
8267
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
8268
    self.needed_locks = locks = {}
8269
    if self.op.remote_node is None:
8270
      locks[locking.LEVEL_NODE] = locking.ALL_SET
8271
    else:
8272
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8273
      locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
8274

    
8275
  def Exec(self, feedback_fn):
8276
    if self.op.remote_node is not None:
8277
      instances = []
8278
      for node in self.op.nodes:
8279
        instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
8280
      result = []
8281
      for i in instances:
8282
        if i.primary_node == self.op.remote_node:
8283
          raise errors.OpPrereqError("Node %s is the primary node of"
8284
                                     " instance %s, cannot use it as"
8285
                                     " secondary" %
8286
                                     (self.op.remote_node, i.name),
8287
                                     errors.ECODE_INVAL)
8288
        result.append([i.name, self.op.remote_node])
8289
    else:
8290
      ial = IAllocator(self.cfg, self.rpc,
8291
                       mode=constants.IALLOCATOR_MODE_MEVAC,
8292
                       evac_nodes=self.op.nodes)
8293
      ial.Run(self.op.iallocator, validate=True)
8294
      if not ial.success:
8295
        raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
8296
                                 errors.ECODE_NORES)
8297
      result = ial.result
8298
    return result
8299

    
8300

    
8301
class LUGrowDisk(LogicalUnit):
8302
  """Grow a disk of an instance.
8303

8304
  """
8305
  HPATH = "disk-grow"
8306
  HTYPE = constants.HTYPE_INSTANCE
8307
  _OP_PARAMS = [
8308
    _PInstanceName,
8309
    ("disk", _NoDefault, _TInt),
8310
    ("amount", _NoDefault, _TInt),
8311
    ("wait_for_sync", True, _TBool),
8312
    ]
8313
  REQ_BGL = False
8314

    
8315
  def ExpandNames(self):
8316
    self._ExpandAndLockInstance()
8317
    self.needed_locks[locking.LEVEL_NODE] = []
8318
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8319

    
8320
  def DeclareLocks(self, level):
8321
    if level == locking.LEVEL_NODE:
8322
      self._LockInstancesNodes()
8323

    
8324
  def BuildHooksEnv(self):
8325
    """Build hooks env.
8326

8327
    This runs on the master, the primary and all the secondaries.
8328

8329
    """
8330
    env = {
8331
      "DISK": self.op.disk,
8332
      "AMOUNT": self.op.amount,
8333
      }
8334
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
8335
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
8336
    return env, nl, nl
8337

    
8338
  def CheckPrereq(self):
8339
    """Check prerequisites.
8340

8341
    This checks that the instance is in the cluster.
8342

8343
    """
8344
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8345
    assert instance is not None, \
8346
      "Cannot retrieve locked instance %s" % self.op.instance_name
8347
    nodenames = list(instance.all_nodes)
8348
    for node in nodenames:
8349
      _CheckNodeOnline(self, node)
8350

    
8351
    self.instance = instance
8352

    
8353
    if instance.disk_template not in constants.DTS_GROWABLE:
8354
      raise errors.OpPrereqError("Instance's disk layout does not support"
8355
                                 " growing.", errors.ECODE_INVAL)
8356

    
8357
    self.disk = instance.FindDisk(self.op.disk)
8358

    
8359
    if instance.disk_template != constants.DT_FILE:
8360
      # TODO: check the free disk space for file, when that feature will be
8361
      # supported
8362
      _CheckNodesFreeDisk(self, nodenames, self.op.amount)
8363

    
8364
  def Exec(self, feedback_fn):
8365
    """Execute disk grow.
8366

8367
    """
8368
    instance = self.instance
8369
    disk = self.disk
8370

    
8371
    disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
8372
    if not disks_ok:
8373
      raise errors.OpExecError("Cannot activate block device to grow")
8374

    
8375
    for node in instance.all_nodes:
8376
      self.cfg.SetDiskID(disk, node)
8377
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
8378
      result.Raise("Grow request failed to node %s" % node)
8379

    
8380
      # TODO: Rewrite code to work properly
8381
      # DRBD goes into sync mode for a short amount of time after executing the
8382
      # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
8383
      # calling "resize" in sync mode fails. Sleeping for a short amount of
8384
      # time is a work-around.
8385
      time.sleep(5)
8386

    
8387
    disk.RecordGrow(self.op.amount)
8388
    self.cfg.Update(instance, feedback_fn)
8389
    if self.op.wait_for_sync:
8390
      disk_abort = not _WaitForSync(self, instance, disks=[disk])
8391
      if disk_abort:
8392
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
8393
                             " status.\nPlease check the instance.")
8394
      if not instance.admin_up:
8395
        _SafeShutdownInstanceDisks(self, instance, disks=[disk])
8396
    elif not instance.admin_up:
8397
      self.proc.LogWarning("Not shutting down the disk even if the instance is"
8398
                           " not supposed to be running because no wait for"
8399
                           " sync mode was requested.")
8400

    
8401

    
8402
class LUQueryInstanceData(NoHooksLU):
8403
  """Query runtime instance data.
8404

8405
  """
8406
  _OP_PARAMS = [
8407
    ("instances", _EmptyList, _TListOf(_TNonEmptyString)),
8408
    ("static", False, _TBool),
8409
    ]
8410
  REQ_BGL = False
8411

    
8412
  def ExpandNames(self):
8413
    self.needed_locks = {}
8414
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
8415

    
8416
    if self.op.instances:
8417
      self.wanted_names = []
8418
      for name in self.op.instances:
8419
        full_name = _ExpandInstanceName(self.cfg, name)
8420
        self.wanted_names.append(full_name)
8421
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
8422
    else:
8423
      self.wanted_names = None
8424
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
8425

    
8426
    self.needed_locks[locking.LEVEL_NODE] = []
8427
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8428

    
8429
  def DeclareLocks(self, level):
8430
    if level == locking.LEVEL_NODE:
8431
      self._LockInstancesNodes()
8432

    
8433
  def CheckPrereq(self):
8434
    """Check prerequisites.
8435

8436
    This only checks the optional instance list against the existing names.
8437

8438
    """
8439
    if self.wanted_names is None:
8440
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
8441

    
8442
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
8443
                             in self.wanted_names]
8444

    
8445
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
8446
    """Returns the status of a block device
8447

8448
    """
8449
    if self.op.static or not node:
8450
      return None
8451

    
8452
    self.cfg.SetDiskID(dev, node)
8453

    
8454
    result = self.rpc.call_blockdev_find(node, dev)
8455
    if result.offline:
8456
      return None
8457

    
8458
    result.Raise("Can't compute disk status for %s" % instance_name)
8459

    
8460
    status = result.payload
8461
    if status is None:
8462
      return None
8463

    
8464
    return (status.dev_path, status.major, status.minor,
8465
            status.sync_percent, status.estimated_time,
8466
            status.is_degraded, status.ldisk_status)
8467

    
8468
  def _ComputeDiskStatus(self, instance, snode, dev):
8469
    """Compute block device status.
8470

8471
    """
8472
    if dev.dev_type in constants.LDS_DRBD:
8473
      # we change the snode then (otherwise we use the one passed in)
8474
      if dev.logical_id[0] == instance.primary_node:
8475
        snode = dev.logical_id[1]
8476
      else:
8477
        snode = dev.logical_id[0]
8478

    
8479
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
8480
                                              instance.name, dev)
8481
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
8482

    
8483
    if dev.children:
8484
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
8485
                      for child in dev.children]
8486
    else:
8487
      dev_children = []
8488

    
8489
    data = {
8490
      "iv_name": dev.iv_name,
8491
      "dev_type": dev.dev_type,
8492
      "logical_id": dev.logical_id,
8493
      "physical_id": dev.physical_id,
8494
      "pstatus": dev_pstatus,
8495
      "sstatus": dev_sstatus,
8496
      "children": dev_children,
8497
      "mode": dev.mode,
8498
      "size": dev.size,
8499
      }
8500

    
8501
    return data
8502

    
8503
  def Exec(self, feedback_fn):
8504
    """Gather and return data"""
8505
    result = {}
8506

    
8507
    cluster = self.cfg.GetClusterInfo()
8508

    
8509
    for instance in self.wanted_instances:
8510
      if not self.op.static:
8511
        remote_info = self.rpc.call_instance_info(instance.primary_node,
8512
                                                  instance.name,
8513
                                                  instance.hypervisor)
8514
        remote_info.Raise("Error checking node %s" % instance.primary_node)
8515
        remote_info = remote_info.payload
8516
        if remote_info and "state" in remote_info:
8517
          remote_state = "up"
8518
        else:
8519
          remote_state = "down"
8520
      else:
8521
        remote_state = None
8522
      if instance.admin_up:
8523
        config_state = "up"
8524
      else:
8525
        config_state = "down"
8526

    
8527
      disks = [self._ComputeDiskStatus(instance, None, device)
8528
               for device in instance.disks]
8529

    
8530
      idict = {
8531
        "name": instance.name,
8532
        "config_state": config_state,
8533
        "run_state": remote_state,
8534
        "pnode": instance.primary_node,
8535
        "snodes": instance.secondary_nodes,
8536
        "os": instance.os,
8537
        # this happens to be the same format used for hooks
8538
        "nics": _NICListToTuple(self, instance.nics),
8539
        "disk_template": instance.disk_template,
8540
        "disks": disks,
8541
        "hypervisor": instance.hypervisor,
8542
        "network_port": instance.network_port,
8543
        "hv_instance": instance.hvparams,
8544
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
8545
        "be_instance": instance.beparams,
8546
        "be_actual": cluster.FillBE(instance),
8547
        "os_instance": instance.osparams,
8548
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
8549
        "serial_no": instance.serial_no,
8550
        "mtime": instance.mtime,
8551
        "ctime": instance.ctime,
8552
        "uuid": instance.uuid,
8553
        }
8554

    
8555
      result[instance.name] = idict
8556

    
8557
    return result
8558

    
8559

    
8560
class LUSetInstanceParams(LogicalUnit):
8561
  """Modifies an instances's parameters.
8562

8563
  """
8564
  HPATH = "instance-modify"
8565
  HTYPE = constants.HTYPE_INSTANCE
8566
  _OP_PARAMS = [
8567
    _PInstanceName,
8568
    ("nics", _EmptyList, _TList),
8569
    ("disks", _EmptyList, _TList),
8570
    ("beparams", _EmptyDict, _TDict),
8571
    ("hvparams", _EmptyDict, _TDict),
8572
    ("disk_template", None, _TMaybeString),
8573
    ("remote_node", None, _TMaybeString),
8574
    ("os_name", None, _TMaybeString),
8575
    ("force_variant", False, _TBool),
8576
    ("osparams", None, _TOr(_TDict, _TNone)),
8577
    _PForce,
8578
    ]
8579
  REQ_BGL = False
8580

    
8581
  def CheckArguments(self):
8582
    if not (self.op.nics or self.op.disks or self.op.disk_template or
8583
            self.op.hvparams or self.op.beparams or self.op.os_name):
8584
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
8585

    
8586
    if self.op.hvparams:
8587
      _CheckGlobalHvParams(self.op.hvparams)
8588

    
8589
    # Disk validation
8590
    disk_addremove = 0
8591
    for disk_op, disk_dict in self.op.disks:
8592
      utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
8593
      if disk_op == constants.DDM_REMOVE:
8594
        disk_addremove += 1
8595
        continue
8596
      elif disk_op == constants.DDM_ADD:
8597
        disk_addremove += 1
8598
      else:
8599
        if not isinstance(disk_op, int):
8600
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
8601
        if not isinstance(disk_dict, dict):
8602
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
8603
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
8604

    
8605
      if disk_op == constants.DDM_ADD:
8606
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
8607
        if mode not in constants.DISK_ACCESS_SET:
8608
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
8609
                                     errors.ECODE_INVAL)
8610
        size = disk_dict.get('size', None)
8611
        if size is None:
8612
          raise errors.OpPrereqError("Required disk parameter size missing",
8613
                                     errors.ECODE_INVAL)
8614
        try:
8615
          size = int(size)
8616
        except (TypeError, ValueError), err:
8617
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
8618
                                     str(err), errors.ECODE_INVAL)
8619
        disk_dict['size'] = size
8620
      else:
8621
        # modification of disk
8622
        if 'size' in disk_dict:
8623
          raise errors.OpPrereqError("Disk size change not possible, use"
8624
                                     " grow-disk", errors.ECODE_INVAL)
8625

    
8626
    if disk_addremove > 1:
8627
      raise errors.OpPrereqError("Only one disk add or remove operation"
8628
                                 " supported at a time", errors.ECODE_INVAL)
8629

    
8630
    if self.op.disks and self.op.disk_template is not None:
8631
      raise errors.OpPrereqError("Disk template conversion and other disk"
8632
                                 " changes not supported at the same time",
8633
                                 errors.ECODE_INVAL)
8634

    
8635
    if self.op.disk_template:
8636
      _CheckDiskTemplate(self.op.disk_template)
8637
      if (self.op.disk_template in constants.DTS_NET_MIRROR and
8638
          self.op.remote_node is None):
8639
        raise errors.OpPrereqError("Changing the disk template to a mirrored"
8640
                                   " one requires specifying a secondary node",
8641
                                   errors.ECODE_INVAL)
8642

    
8643
    # NIC validation
8644
    nic_addremove = 0
8645
    for nic_op, nic_dict in self.op.nics:
8646
      utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
8647
      if nic_op == constants.DDM_REMOVE:
8648
        nic_addremove += 1
8649
        continue
8650
      elif nic_op == constants.DDM_ADD:
8651
        nic_addremove += 1
8652
      else:
8653
        if not isinstance(nic_op, int):
8654
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
8655
        if not isinstance(nic_dict, dict):
8656
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
8657
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
8658

    
8659
      # nic_dict should be a dict
8660
      nic_ip = nic_dict.get('ip', None)
8661
      if nic_ip is not None:
8662
        if nic_ip.lower() == constants.VALUE_NONE:
8663
          nic_dict['ip'] = None
8664
        else:
8665
          if not netutils.IsValidIP4(nic_ip):
8666
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
8667
                                       errors.ECODE_INVAL)
8668

    
8669
      nic_bridge = nic_dict.get('bridge', None)
8670
      nic_link = nic_dict.get('link', None)
8671
      if nic_bridge and nic_link:
8672
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
8673
                                   " at the same time", errors.ECODE_INVAL)
8674
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
8675
        nic_dict['bridge'] = None
8676
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
8677
        nic_dict['link'] = None
8678

    
8679
      if nic_op == constants.DDM_ADD:
8680
        nic_mac = nic_dict.get('mac', None)
8681
        if nic_mac is None:
8682
          nic_dict['mac'] = constants.VALUE_AUTO
8683

    
8684
      if 'mac' in nic_dict:
8685
        nic_mac = nic_dict['mac']
8686
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8687
          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
8688

    
8689
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
8690
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
8691
                                     " modifying an existing nic",
8692
                                     errors.ECODE_INVAL)
8693

    
8694
    if nic_addremove > 1:
8695
      raise errors.OpPrereqError("Only one NIC add or remove operation"
8696
                                 " supported at a time", errors.ECODE_INVAL)
8697

    
8698
  def ExpandNames(self):
8699
    self._ExpandAndLockInstance()
8700
    self.needed_locks[locking.LEVEL_NODE] = []
8701
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8702

    
8703
  def DeclareLocks(self, level):
8704
    if level == locking.LEVEL_NODE:
8705
      self._LockInstancesNodes()
8706
      if self.op.disk_template and self.op.remote_node:
8707
        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8708
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
8709

    
8710
  def BuildHooksEnv(self):
8711
    """Build hooks env.
8712

8713
    This runs on the master, primary and secondaries.
8714

8715
    """
8716
    args = dict()
8717
    if constants.BE_MEMORY in self.be_new:
8718
      args['memory'] = self.be_new[constants.BE_MEMORY]
8719
    if constants.BE_VCPUS in self.be_new:
8720
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
8721
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
8722
    # information at all.
8723
    if self.op.nics:
8724
      args['nics'] = []
8725
      nic_override = dict(self.op.nics)
8726
      for idx, nic in enumerate(self.instance.nics):
8727
        if idx in nic_override:
8728
          this_nic_override = nic_override[idx]
8729
        else:
8730
          this_nic_override = {}
8731
        if 'ip' in this_nic_override:
8732
          ip = this_nic_override['ip']
8733
        else:
8734
          ip = nic.ip
8735
        if 'mac' in this_nic_override:
8736
          mac = this_nic_override['mac']
8737
        else:
8738
          mac = nic.mac
8739
        if idx in self.nic_pnew:
8740
          nicparams = self.nic_pnew[idx]
8741
        else:
8742
          nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
8743
        mode = nicparams[constants.NIC_MODE]
8744
        link = nicparams[constants.NIC_LINK]
8745
        args['nics'].append((ip, mac, mode, link))
8746
      if constants.DDM_ADD in nic_override:
8747
        ip = nic_override[constants.DDM_ADD].get('ip', None)
8748
        mac = nic_override[constants.DDM_ADD]['mac']
8749
        nicparams = self.nic_pnew[constants.DDM_ADD]
8750
        mode = nicparams[constants.NIC_MODE]
8751
        link = nicparams[constants.NIC_LINK]
8752
        args['nics'].append((ip, mac, mode, link))
8753
      elif constants.DDM_REMOVE in nic_override:
8754
        del args['nics'][-1]
8755

    
8756
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
8757
    if self.op.disk_template:
8758
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
8759
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
8760
    return env, nl, nl
8761

    
8762
  def CheckPrereq(self):
8763
    """Check prerequisites.
8764

8765
    This only checks the instance list against the existing names.
8766

8767
    """
8768
    # checking the new params on the primary/secondary nodes
8769

    
8770
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8771
    cluster = self.cluster = self.cfg.GetClusterInfo()
8772
    assert self.instance is not None, \
8773
      "Cannot retrieve locked instance %s" % self.op.instance_name
8774
    pnode = instance.primary_node
8775
    nodelist = list(instance.all_nodes)
8776

    
8777
    # OS change
8778
    if self.op.os_name and not self.op.force:
8779
      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
8780
                      self.op.force_variant)
8781
      instance_os = self.op.os_name
8782
    else:
8783
      instance_os = instance.os
8784

    
8785
    if self.op.disk_template:
8786
      if instance.disk_template == self.op.disk_template:
8787
        raise errors.OpPrereqError("Instance already has disk template %s" %
8788
                                   instance.disk_template, errors.ECODE_INVAL)
8789

    
8790
      if (instance.disk_template,
8791
          self.op.disk_template) not in self._DISK_CONVERSIONS:
8792
        raise errors.OpPrereqError("Unsupported disk template conversion from"
8793
                                   " %s to %s" % (instance.disk_template,
8794
                                                  self.op.disk_template),
8795
                                   errors.ECODE_INVAL)
8796
      _CheckInstanceDown(self, instance, "cannot change disk template")
8797
      if self.op.disk_template in constants.DTS_NET_MIRROR:
8798
        if self.op.remote_node == pnode:
8799
          raise errors.OpPrereqError("Given new secondary node %s is the same"
8800
                                     " as the primary node of the instance" %
8801
                                     self.op.remote_node, errors.ECODE_STATE)
8802
        _CheckNodeOnline(self, self.op.remote_node)
8803
        _CheckNodeNotDrained(self, self.op.remote_node)
8804
        disks = [{"size": d.size} for d in instance.disks]
8805
        required = _ComputeDiskSize(self.op.disk_template, disks)
8806
        _CheckNodesFreeDisk(self, [self.op.remote_node], required)
8807

    
8808
    # hvparams processing
8809
    if self.op.hvparams:
8810
      hv_type = instance.hypervisor
8811
      i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
8812
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
8813
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
8814

    
8815
      # local check
8816
      hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
8817
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
8818
      self.hv_new = hv_new # the new actual values
8819
      self.hv_inst = i_hvdict # the new dict (without defaults)
8820
    else:
8821
      self.hv_new = self.hv_inst = {}
8822

    
8823
    # beparams processing
8824
    if self.op.beparams:
8825
      i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
8826
                                   use_none=True)
8827
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
8828
      be_new = cluster.SimpleFillBE(i_bedict)
8829
      self.be_new = be_new # the new actual values
8830
      self.be_inst = i_bedict # the new dict (without defaults)
8831
    else:
8832
      self.be_new = self.be_inst = {}
8833

    
8834
    # osparams processing
8835
    if self.op.osparams:
8836
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
8837
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
8838
      self.os_new = cluster.SimpleFillOS(instance_os, i_osdict)
8839
      self.os_inst = i_osdict # the new dict (without defaults)
8840
    else:
8841
      self.os_new = self.os_inst = {}
8842

    
8843
    self.warn = []
8844

    
8845
    if constants.BE_MEMORY in self.op.beparams and not self.op.force:
8846
      mem_check_list = [pnode]
8847
      if be_new[constants.BE_AUTO_BALANCE]:
8848
        # either we changed auto_balance to yes or it was from before
8849
        mem_check_list.extend(instance.secondary_nodes)
8850
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
8851
                                                  instance.hypervisor)
8852
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
8853
                                         instance.hypervisor)
8854
      pninfo = nodeinfo[pnode]
8855
      msg = pninfo.fail_msg
8856
      if msg:
8857
        # Assume the primary node is unreachable and go ahead
8858
        self.warn.append("Can't get info from primary node %s: %s" %
8859
                         (pnode,  msg))
8860
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
8861
        self.warn.append("Node data from primary node %s doesn't contain"
8862
                         " free memory information" % pnode)
8863
      elif instance_info.fail_msg:
8864
        self.warn.append("Can't get instance runtime information: %s" %
8865
                        instance_info.fail_msg)
8866
      else:
8867
        if instance_info.payload:
8868
          current_mem = int(instance_info.payload['memory'])
8869
        else:
8870
          # Assume instance not running
8871
          # (there is a slight race condition here, but it's not very probable,
8872
          # and we have no other way to check)
8873
          current_mem = 0
8874
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
8875
                    pninfo.payload['memory_free'])
8876
        if miss_mem > 0:
8877
          raise errors.OpPrereqError("This change will prevent the instance"
8878
                                     " from starting, due to %d MB of memory"
8879
                                     " missing on its primary node" % miss_mem,
8880
                                     errors.ECODE_NORES)
8881

    
8882
      if be_new[constants.BE_AUTO_BALANCE]:
8883
        for node, nres in nodeinfo.items():
8884
          if node not in instance.secondary_nodes:
8885
            continue
8886
          msg = nres.fail_msg
8887
          if msg:
8888
            self.warn.append("Can't get info from secondary node %s: %s" %
8889
                             (node, msg))
8890
          elif not isinstance(nres.payload.get('memory_free', None), int):
8891
            self.warn.append("Secondary node %s didn't return free"
8892
                             " memory information" % node)
8893
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
8894
            self.warn.append("Not enough memory to failover instance to"
8895
                             " secondary node %s" % node)
8896

    
8897
    # NIC processing
8898
    self.nic_pnew = {}
8899
    self.nic_pinst = {}
8900
    for nic_op, nic_dict in self.op.nics:
8901
      if nic_op == constants.DDM_REMOVE:
8902
        if not instance.nics:
8903
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
8904
                                     errors.ECODE_INVAL)
8905
        continue
8906
      if nic_op != constants.DDM_ADD:
8907
        # an existing nic
8908
        if not instance.nics:
8909
          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
8910
                                     " no NICs" % nic_op,
8911
                                     errors.ECODE_INVAL)
8912
        if nic_op < 0 or nic_op >= len(instance.nics):
8913
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
8914
                                     " are 0 to %d" %
8915
                                     (nic_op, len(instance.nics) - 1),
8916
                                     errors.ECODE_INVAL)
8917
        old_nic_params = instance.nics[nic_op].nicparams
8918
        old_nic_ip = instance.nics[nic_op].ip
8919
      else:
8920
        old_nic_params = {}
8921
        old_nic_ip = None
8922

    
8923
      update_params_dict = dict([(key, nic_dict[key])
8924
                                 for key in constants.NICS_PARAMETERS
8925
                                 if key in nic_dict])
8926

    
8927
      if 'bridge' in nic_dict:
8928
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
8929

    
8930
      new_nic_params = _GetUpdatedParams(old_nic_params,
8931
                                         update_params_dict)
8932
      utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
8933
      new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
8934
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
8935
      self.nic_pinst[nic_op] = new_nic_params
8936
      self.nic_pnew[nic_op] = new_filled_nic_params
8937
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
8938

    
8939
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
8940
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
8941
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
8942
        if msg:
8943
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
8944
          if self.op.force:
8945
            self.warn.append(msg)
8946
          else:
8947
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
8948
      if new_nic_mode == constants.NIC_MODE_ROUTED:
8949
        if 'ip' in nic_dict:
8950
          nic_ip = nic_dict['ip']
8951
        else:
8952
          nic_ip = old_nic_ip
8953
        if nic_ip is None:
8954
          raise errors.OpPrereqError('Cannot set the nic ip to None'
8955
                                     ' on a routed nic', errors.ECODE_INVAL)
8956
      if 'mac' in nic_dict:
8957
        nic_mac = nic_dict['mac']
8958
        if nic_mac is None:
8959
          raise errors.OpPrereqError('Cannot set the nic mac to None',
8960
                                     errors.ECODE_INVAL)
8961
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8962
          # otherwise generate the mac
8963
          nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
8964
        else:
8965
          # or validate/reserve the current one
8966
          try:
8967
            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
8968
          except errors.ReservationError:
8969
            raise errors.OpPrereqError("MAC address %s already in use"
8970
                                       " in cluster" % nic_mac,
8971
                                       errors.ECODE_NOTUNIQUE)
8972

    
8973
    # DISK processing
8974
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
8975
      raise errors.OpPrereqError("Disk operations not supported for"
8976
                                 " diskless instances",
8977
                                 errors.ECODE_INVAL)
8978
    for disk_op, _ in self.op.disks:
8979
      if disk_op == constants.DDM_REMOVE:
8980
        if len(instance.disks) == 1:
8981
          raise errors.OpPrereqError("Cannot remove the last disk of"
8982
                                     " an instance", errors.ECODE_INVAL)
8983
        _CheckInstanceDown(self, instance, "cannot remove disks")
8984

    
8985
      if (disk_op == constants.DDM_ADD and
8986
          len(instance.nics) >= constants.MAX_DISKS):
8987
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
8988
                                   " add more" % constants.MAX_DISKS,
8989
                                   errors.ECODE_STATE)
8990
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
8991
        # an existing disk
8992
        if disk_op < 0 or disk_op >= len(instance.disks):
8993
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
8994
                                     " are 0 to %d" %
8995
                                     (disk_op, len(instance.disks)),
8996
                                     errors.ECODE_INVAL)
8997

    
8998
    return
8999

    
9000
  def _ConvertPlainToDrbd(self, feedback_fn):
9001
    """Converts an instance from plain to drbd.
9002

9003
    """
9004
    feedback_fn("Converting template to drbd")
9005
    instance = self.instance
9006
    pnode = instance.primary_node
9007
    snode = self.op.remote_node
9008

    
9009
    # create a fake disk info for _GenerateDiskTemplate
9010
    disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
9011
    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
9012
                                      instance.name, pnode, [snode],
9013
                                      disk_info, None, None, 0)
9014
    info = _GetInstanceInfoText(instance)
9015
    feedback_fn("Creating aditional volumes...")
9016
    # first, create the missing data and meta devices
9017
    for disk in new_disks:
9018
      # unfortunately this is... not too nice
9019
      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
9020
                            info, True)
9021
      for child in disk.children:
9022
        _CreateSingleBlockDev(self, snode, instance, child, info, True)
9023
    # at this stage, all new LVs have been created, we can rename the
9024
    # old ones
9025
    feedback_fn("Renaming original volumes...")
9026
    rename_list = [(o, n.children[0].logical_id)
9027
                   for (o, n) in zip(instance.disks, new_disks)]
9028
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
9029
    result.Raise("Failed to rename original LVs")
9030

    
9031
    feedback_fn("Initializing DRBD devices...")
9032
    # all child devices are in place, we can now create the DRBD devices
9033
    for disk in new_disks:
9034
      for node in [pnode, snode]:
9035
        f_create = node == pnode
9036
        _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
9037

    
9038
    # at this point, the instance has been modified
9039
    instance.disk_template = constants.DT_DRBD8
9040
    instance.disks = new_disks
9041
    self.cfg.Update(instance, feedback_fn)
9042

    
9043
    # disks are created, waiting for sync
9044
    disk_abort = not _WaitForSync(self, instance)
9045
    if disk_abort:
9046
      raise errors.OpExecError("There are some degraded disks for"
9047
                               " this instance, please cleanup manually")
9048

    
9049
  def _ConvertDrbdToPlain(self, feedback_fn):
9050
    """Converts an instance from drbd to plain.
9051

9052
    """
9053
    instance = self.instance
9054
    assert len(instance.secondary_nodes) == 1
9055
    pnode = instance.primary_node
9056
    snode = instance.secondary_nodes[0]
9057
    feedback_fn("Converting template to plain")
9058

    
9059
    old_disks = instance.disks
9060
    new_disks = [d.children[0] for d in old_disks]
9061

    
9062
    # copy over size and mode
9063
    for parent, child in zip(old_disks, new_disks):
9064
      child.size = parent.size
9065
      child.mode = parent.mode
9066

    
9067
    # update instance structure
9068
    instance.disks = new_disks
9069
    instance.disk_template = constants.DT_PLAIN
9070
    self.cfg.Update(instance, feedback_fn)
9071

    
9072
    feedback_fn("Removing volumes on the secondary node...")
9073
    for disk in old_disks:
9074
      self.cfg.SetDiskID(disk, snode)
9075
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
9076
      if msg:
9077
        self.LogWarning("Could not remove block device %s on node %s,"
9078
                        " continuing anyway: %s", disk.iv_name, snode, msg)
9079

    
9080
    feedback_fn("Removing unneeded volumes on the primary node...")
9081
    for idx, disk in enumerate(old_disks):
9082
      meta = disk.children[1]
9083
      self.cfg.SetDiskID(meta, pnode)
9084
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
9085
      if msg:
9086
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
9087
                        " continuing anyway: %s", idx, pnode, msg)
9088

    
9089

    
9090
  def Exec(self, feedback_fn):
9091
    """Modifies an instance.
9092

9093
    All parameters take effect only at the next restart of the instance.
9094

9095
    """
9096
    # Process here the warnings from CheckPrereq, as we don't have a
9097
    # feedback_fn there.
9098
    for warn in self.warn:
9099
      feedback_fn("WARNING: %s" % warn)
9100

    
9101
    result = []
9102
    instance = self.instance
9103
    # disk changes
9104
    for disk_op, disk_dict in self.op.disks:
9105
      if disk_op == constants.DDM_REMOVE:
9106
        # remove the last disk
9107
        device = instance.disks.pop()
9108
        device_idx = len(instance.disks)
9109
        for node, disk in device.ComputeNodeTree(instance.primary_node):
9110
          self.cfg.SetDiskID(disk, node)
9111
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
9112
          if msg:
9113
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
9114
                            " continuing anyway", device_idx, node, msg)
9115
        result.append(("disk/%d" % device_idx, "remove"))
9116
      elif disk_op == constants.DDM_ADD:
9117
        # add a new disk
9118
        if instance.disk_template == constants.DT_FILE:
9119
          file_driver, file_path = instance.disks[0].logical_id
9120
          file_path = os.path.dirname(file_path)
9121
        else:
9122
          file_driver = file_path = None
9123
        disk_idx_base = len(instance.disks)
9124
        new_disk = _GenerateDiskTemplate(self,
9125
                                         instance.disk_template,
9126
                                         instance.name, instance.primary_node,
9127
                                         instance.secondary_nodes,
9128
                                         [disk_dict],
9129
                                         file_path,
9130
                                         file_driver,
9131
                                         disk_idx_base)[0]
9132
        instance.disks.append(new_disk)
9133
        info = _GetInstanceInfoText(instance)
9134

    
9135
        logging.info("Creating volume %s for instance %s",
9136
                     new_disk.iv_name, instance.name)
9137
        # Note: this needs to be kept in sync with _CreateDisks
9138
        #HARDCODE
9139
        for node in instance.all_nodes:
9140
          f_create = node == instance.primary_node
9141
          try:
9142
            _CreateBlockDev(self, node, instance, new_disk,
9143
                            f_create, info, f_create)
9144
          except errors.OpExecError, err:
9145
            self.LogWarning("Failed to create volume %s (%s) on"
9146
                            " node %s: %s",
9147
                            new_disk.iv_name, new_disk, node, err)
9148
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
9149
                       (new_disk.size, new_disk.mode)))
9150
      else:
9151
        # change a given disk
9152
        instance.disks[disk_op].mode = disk_dict['mode']
9153
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
9154

    
9155
    if self.op.disk_template:
9156
      r_shut = _ShutdownInstanceDisks(self, instance)
9157
      if not r_shut:
9158
        raise errors.OpExecError("Cannot shutdow instance disks, unable to"
9159
                                 " proceed with disk template conversion")
9160
      mode = (instance.disk_template, self.op.disk_template)
9161
      try:
9162
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
9163
      except:
9164
        self.cfg.ReleaseDRBDMinors(instance.name)
9165
        raise
9166
      result.append(("disk_template", self.op.disk_template))
9167

    
9168
    # NIC changes
9169
    for nic_op, nic_dict in self.op.nics:
9170
      if nic_op == constants.DDM_REMOVE:
9171
        # remove the last nic
9172
        del instance.nics[-1]
9173
        result.append(("nic.%d" % len(instance.nics), "remove"))
9174
      elif nic_op == constants.DDM_ADD:
9175
        # mac and bridge should be set, by now
9176
        mac = nic_dict['mac']
9177
        ip = nic_dict.get('ip', None)
9178
        nicparams = self.nic_pinst[constants.DDM_ADD]
9179
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
9180
        instance.nics.append(new_nic)
9181
        result.append(("nic.%d" % (len(instance.nics) - 1),
9182
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
9183
                       (new_nic.mac, new_nic.ip,
9184
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
9185
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
9186
                       )))
9187
      else:
9188
        for key in 'mac', 'ip':
9189
          if key in nic_dict:
9190
            setattr(instance.nics[nic_op], key, nic_dict[key])
9191
        if nic_op in self.nic_pinst:
9192
          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
9193
        for key, val in nic_dict.iteritems():
9194
          result.append(("nic.%s/%d" % (key, nic_op), val))
9195

    
9196
    # hvparams changes
9197
    if self.op.hvparams:
9198
      instance.hvparams = self.hv_inst
9199
      for key, val in self.op.hvparams.iteritems():
9200
        result.append(("hv/%s" % key, val))
9201

    
9202
    # beparams changes
9203
    if self.op.beparams:
9204
      instance.beparams = self.be_inst
9205
      for key, val in self.op.beparams.iteritems():
9206
        result.append(("be/%s" % key, val))
9207

    
9208
    # OS change
9209
    if self.op.os_name:
9210
      instance.os = self.op.os_name
9211

    
9212
    # osparams changes
9213
    if self.op.osparams:
9214
      instance.osparams = self.os_inst
9215
      for key, val in self.op.osparams.iteritems():
9216
        result.append(("os/%s" % key, val))
9217

    
9218
    self.cfg.Update(instance, feedback_fn)
9219

    
9220
    return result
9221

    
9222
  _DISK_CONVERSIONS = {
9223
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
9224
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
9225
    }
9226

    
9227

    
9228
class LUQueryExports(NoHooksLU):
9229
  """Query the exports list
9230

9231
  """
9232
  _OP_PARAMS = [
9233
    ("nodes", _EmptyList, _TListOf(_TNonEmptyString)),
9234
    ("use_locking", False, _TBool),
9235
    ]
9236
  REQ_BGL = False
9237

    
9238
  def ExpandNames(self):
9239
    self.needed_locks = {}
9240
    self.share_locks[locking.LEVEL_NODE] = 1
9241
    if not self.op.nodes:
9242
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9243
    else:
9244
      self.needed_locks[locking.LEVEL_NODE] = \
9245
        _GetWantedNodes(self, self.op.nodes)
9246

    
9247
  def Exec(self, feedback_fn):
9248
    """Compute the list of all the exported system images.
9249

9250
    @rtype: dict
9251
    @return: a dictionary with the structure node->(export-list)
9252
        where export-list is a list of the instances exported on
9253
        that node.
9254

9255
    """
9256
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
9257
    rpcresult = self.rpc.call_export_list(self.nodes)
9258
    result = {}
9259
    for node in rpcresult:
9260
      if rpcresult[node].fail_msg:
9261
        result[node] = False
9262
      else:
9263
        result[node] = rpcresult[node].payload
9264

    
9265
    return result
9266

    
9267

    
9268
class LUPrepareExport(NoHooksLU):
9269
  """Prepares an instance for an export and returns useful information.
9270

9271
  """
9272
  _OP_PARAMS = [
9273
    _PInstanceName,
9274
    ("mode", _NoDefault, _TElemOf(constants.EXPORT_MODES)),
9275
    ]
9276
  REQ_BGL = False
9277

    
9278
  def ExpandNames(self):
9279
    self._ExpandAndLockInstance()
9280

    
9281
  def CheckPrereq(self):
9282
    """Check prerequisites.
9283

9284
    """
9285
    instance_name = self.op.instance_name
9286

    
9287
    self.instance = self.cfg.GetInstanceInfo(instance_name)
9288
    assert self.instance is not None, \
9289
          "Cannot retrieve locked instance %s" % self.op.instance_name
9290
    _CheckNodeOnline(self, self.instance.primary_node)
9291

    
9292
    self._cds = _GetClusterDomainSecret()
9293

    
9294
  def Exec(self, feedback_fn):
9295
    """Prepares an instance for an export.
9296

9297
    """
9298
    instance = self.instance
9299

    
9300
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
9301
      salt = utils.GenerateSecret(8)
9302

    
9303
      feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
9304
      result = self.rpc.call_x509_cert_create(instance.primary_node,
9305
                                              constants.RIE_CERT_VALIDITY)
9306
      result.Raise("Can't create X509 key and certificate on %s" % result.node)
9307

    
9308
      (name, cert_pem) = result.payload
9309

    
9310
      cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
9311
                                             cert_pem)
9312

    
9313
      return {
9314
        "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
9315
        "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
9316
                          salt),
9317
        "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
9318
        }
9319

    
9320
    return None
9321

    
9322

    
9323
class LUExportInstance(LogicalUnit):
9324
  """Export an instance to an image in the cluster.
9325

9326
  """
9327
  HPATH = "instance-export"
9328
  HTYPE = constants.HTYPE_INSTANCE
9329
  _OP_PARAMS = [
9330
    _PInstanceName,
9331
    ("target_node", _NoDefault, _TOr(_TNonEmptyString, _TList)),
9332
    ("shutdown", True, _TBool),
9333
    _PShutdownTimeout,
9334
    ("remove_instance", False, _TBool),
9335
    ("ignore_remove_failures", False, _TBool),
9336
    ("mode", constants.EXPORT_MODE_LOCAL, _TElemOf(constants.EXPORT_MODES)),
9337
    ("x509_key_name", None, _TOr(_TList, _TNone)),
9338
    ("destination_x509_ca", None, _TMaybeString),
9339
    ]
9340
  REQ_BGL = False
9341

    
9342
  def CheckArguments(self):
9343
    """Check the arguments.
9344

9345
    """
9346
    self.x509_key_name = self.op.x509_key_name
9347
    self.dest_x509_ca_pem = self.op.destination_x509_ca
9348

    
9349
    if self.op.remove_instance and not self.op.shutdown:
9350
      raise errors.OpPrereqError("Can not remove instance without shutting it"
9351
                                 " down before")
9352

    
9353
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
9354
      if not self.x509_key_name:
9355
        raise errors.OpPrereqError("Missing X509 key name for encryption",
9356
                                   errors.ECODE_INVAL)
9357

    
9358
      if not self.dest_x509_ca_pem:
9359
        raise errors.OpPrereqError("Missing destination X509 CA",
9360
                                   errors.ECODE_INVAL)
9361

    
9362
  def ExpandNames(self):
9363
    self._ExpandAndLockInstance()
9364

    
9365
    # Lock all nodes for local exports
9366
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9367
      # FIXME: lock only instance primary and destination node
9368
      #
9369
      # Sad but true, for now we have do lock all nodes, as we don't know where
9370
      # the previous export might be, and in this LU we search for it and
9371
      # remove it from its current node. In the future we could fix this by:
9372
      #  - making a tasklet to search (share-lock all), then create the
9373
      #    new one, then one to remove, after
9374
      #  - removing the removal operation altogether
9375
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9376

    
9377
  def DeclareLocks(self, level):
9378
    """Last minute lock declaration."""
9379
    # All nodes are locked anyway, so nothing to do here.
9380

    
9381
  def BuildHooksEnv(self):
9382
    """Build hooks env.
9383

9384
    This will run on the master, primary node and target node.
9385

9386
    """
9387
    env = {
9388
      "EXPORT_MODE": self.op.mode,
9389
      "EXPORT_NODE": self.op.target_node,
9390
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
9391
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
9392
      # TODO: Generic function for boolean env variables
9393
      "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
9394
      }
9395

    
9396
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
9397

    
9398
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
9399

    
9400
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9401
      nl.append(self.op.target_node)
9402

    
9403
    return env, nl, nl
9404

    
9405
  def CheckPrereq(self):
9406
    """Check prerequisites.
9407

9408
    This checks that the instance and node names are valid.
9409

9410
    """
9411
    instance_name = self.op.instance_name
9412

    
9413
    self.instance = self.cfg.GetInstanceInfo(instance_name)
9414
    assert self.instance is not None, \
9415
          "Cannot retrieve locked instance %s" % self.op.instance_name
9416
    _CheckNodeOnline(self, self.instance.primary_node)
9417

    
9418
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9419
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
9420
      self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
9421
      assert self.dst_node is not None
9422

    
9423
      _CheckNodeOnline(self, self.dst_node.name)
9424
      _CheckNodeNotDrained(self, self.dst_node.name)
9425

    
9426
      self._cds = None
9427
      self.dest_disk_info = None
9428
      self.dest_x509_ca = None
9429

    
9430
    elif self.op.mode == constants.EXPORT_MODE_REMOTE:
9431
      self.dst_node = None
9432

    
9433
      if len(self.op.target_node) != len(self.instance.disks):
9434
        raise errors.OpPrereqError(("Received destination information for %s"
9435
                                    " disks, but instance %s has %s disks") %
9436
                                   (len(self.op.target_node), instance_name,
9437
                                    len(self.instance.disks)),
9438
                                   errors.ECODE_INVAL)
9439

    
9440
      cds = _GetClusterDomainSecret()
9441

    
9442
      # Check X509 key name
9443
      try:
9444
        (key_name, hmac_digest, hmac_salt) = self.x509_key_name
9445
      except (TypeError, ValueError), err:
9446
        raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
9447

    
9448
      if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
9449
        raise errors.OpPrereqError("HMAC for X509 key name is wrong",
9450
                                   errors.ECODE_INVAL)
9451

    
9452
      # Load and verify CA
9453
      try:
9454
        (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
9455
      except OpenSSL.crypto.Error, err:
9456
        raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
9457
                                   (err, ), errors.ECODE_INVAL)
9458

    
9459
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
9460
      if errcode is not None:
9461
        raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
9462
                                   (msg, ), errors.ECODE_INVAL)
9463

    
9464
      self.dest_x509_ca = cert
9465

    
9466
      # Verify target information
9467
      disk_info = []
9468
      for idx, disk_data in enumerate(self.op.target_node):
9469
        try:
9470
          (host, port, magic) = \
9471
            masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
9472
        except errors.GenericError, err:
9473
          raise errors.OpPrereqError("Target info for disk %s: %s" %
9474
                                     (idx, err), errors.ECODE_INVAL)
9475

    
9476
        disk_info.append((host, port, magic))
9477

    
9478
      assert len(disk_info) == len(self.op.target_node)
9479
      self.dest_disk_info = disk_info
9480

    
9481
    else:
9482
      raise errors.ProgrammerError("Unhandled export mode %r" %
9483
                                   self.op.mode)
9484

    
9485
    # instance disk type verification
9486
    # TODO: Implement export support for file-based disks
9487
    for disk in self.instance.disks:
9488
      if disk.dev_type == constants.LD_FILE:
9489
        raise errors.OpPrereqError("Export not supported for instances with"
9490
                                   " file-based disks", errors.ECODE_INVAL)
9491

    
9492
  def _CleanupExports(self, feedback_fn):
9493
    """Removes exports of current instance from all other nodes.
9494

9495
    If an instance in a cluster with nodes A..D was exported to node C, its
9496
    exports will be removed from the nodes A, B and D.
9497

9498
    """
9499
    assert self.op.mode != constants.EXPORT_MODE_REMOTE
9500

    
9501
    nodelist = self.cfg.GetNodeList()
9502
    nodelist.remove(self.dst_node.name)
9503

    
9504
    # on one-node clusters nodelist will be empty after the removal
9505
    # if we proceed the backup would be removed because OpQueryExports
9506
    # substitutes an empty list with the full cluster node list.
9507
    iname = self.instance.name
9508
    if nodelist:
9509
      feedback_fn("Removing old exports for instance %s" % iname)
9510
      exportlist = self.rpc.call_export_list(nodelist)
9511
      for node in exportlist:
9512
        if exportlist[node].fail_msg:
9513
          continue
9514
        if iname in exportlist[node].payload:
9515
          msg = self.rpc.call_export_remove(node, iname).fail_msg
9516
          if msg:
9517
            self.LogWarning("Could not remove older export for instance %s"
9518
                            " on node %s: %s", iname, node, msg)
9519

    
9520
  def Exec(self, feedback_fn):
9521
    """Export an instance to an image in the cluster.
9522

9523
    """
9524
    assert self.op.mode in constants.EXPORT_MODES
9525

    
9526
    instance = self.instance
9527
    src_node = instance.primary_node
9528

    
9529
    if self.op.shutdown:
9530
      # shutdown the instance, but not the disks
9531
      feedback_fn("Shutting down instance %s" % instance.name)
9532
      result = self.rpc.call_instance_shutdown(src_node, instance,
9533
                                               self.op.shutdown_timeout)
9534
      # TODO: Maybe ignore failures if ignore_remove_failures is set
9535
      result.Raise("Could not shutdown instance %s on"
9536
                   " node %s" % (instance.name, src_node))
9537

    
9538
    # set the disks ID correctly since call_instance_start needs the
9539
    # correct drbd minor to create the symlinks
9540
    for disk in instance.disks:
9541
      self.cfg.SetDiskID(disk, src_node)
9542

    
9543
    activate_disks = (not instance.admin_up)
9544

    
9545
    if activate_disks:
9546
      # Activate the instance disks if we'exporting a stopped instance
9547
      feedback_fn("Activating disks for %s" % instance.name)
9548
      _StartInstanceDisks(self, instance, None)
9549

    
9550
    try:
9551
      helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
9552
                                                     instance)
9553

    
9554
      helper.CreateSnapshots()
9555
      try:
9556
        if (self.op.shutdown and instance.admin_up and
9557
            not self.op.remove_instance):
9558
          assert not activate_disks
9559
          feedback_fn("Starting instance %s" % instance.name)
9560
          result = self.rpc.call_instance_start(src_node, instance, None, None)
9561
          msg = result.fail_msg
9562
          if msg:
9563
            feedback_fn("Failed to start instance: %s" % msg)
9564
            _ShutdownInstanceDisks(self, instance)
9565
            raise errors.OpExecError("Could not start instance: %s" % msg)
9566

    
9567
        if self.op.mode == constants.EXPORT_MODE_LOCAL:
9568
          (fin_resu, dresults) = helper.LocalExport(self.dst_node)
9569
        elif self.op.mode == constants.EXPORT_MODE_REMOTE:
9570
          connect_timeout = constants.RIE_CONNECT_TIMEOUT
9571
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
9572

    
9573
          (key_name, _, _) = self.x509_key_name
9574

    
9575
          dest_ca_pem = \
9576
            OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
9577
                                            self.dest_x509_ca)
9578

    
9579
          (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
9580
                                                     key_name, dest_ca_pem,
9581
                                                     timeouts)
9582
      finally:
9583
        helper.Cleanup()
9584

    
9585
      # Check for backwards compatibility
9586
      assert len(dresults) == len(instance.disks)
9587
      assert compat.all(isinstance(i, bool) for i in dresults), \
9588
             "Not all results are boolean: %r" % dresults
9589

    
9590
    finally:
9591
      if activate_disks:
9592
        feedback_fn("Deactivating disks for %s" % instance.name)
9593
        _ShutdownInstanceDisks(self, instance)
9594

    
9595
    if not (compat.all(dresults) and fin_resu):
9596
      failures = []
9597
      if not fin_resu:
9598
        failures.append("export finalization")
9599
      if not compat.all(dresults):
9600
        fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
9601
                               if not dsk)
9602
        failures.append("disk export: disk(s) %s" % fdsk)
9603

    
9604
      raise errors.OpExecError("Export failed, errors in %s" %
9605
                               utils.CommaJoin(failures))
9606

    
9607
    # At this point, the export was successful, we can cleanup/finish
9608

    
9609
    # Remove instance if requested
9610
    if self.op.remove_instance:
9611
      feedback_fn("Removing instance %s" % instance.name)
9612
      _RemoveInstance(self, feedback_fn, instance,
9613
                      self.op.ignore_remove_failures)
9614

    
9615
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9616
      self._CleanupExports(feedback_fn)
9617

    
9618
    return fin_resu, dresults
9619

    
9620

    
9621
class LURemoveExport(NoHooksLU):
9622
  """Remove exports related to the named instance.
9623

9624
  """
9625
  _OP_PARAMS = [
9626
    _PInstanceName,
9627
    ]
9628
  REQ_BGL = False
9629

    
9630
  def ExpandNames(self):
9631
    self.needed_locks = {}
9632
    # We need all nodes to be locked in order for RemoveExport to work, but we
9633
    # don't need to lock the instance itself, as nothing will happen to it (and
9634
    # we can remove exports also for a removed instance)
9635
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9636

    
9637
  def Exec(self, feedback_fn):
9638
    """Remove any export.
9639

9640
    """
9641
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
9642
    # If the instance was not found we'll try with the name that was passed in.
9643
    # This will only work if it was an FQDN, though.
9644
    fqdn_warn = False
9645
    if not instance_name:
9646
      fqdn_warn = True
9647
      instance_name = self.op.instance_name
9648

    
9649
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
9650
    exportlist = self.rpc.call_export_list(locked_nodes)
9651
    found = False
9652
    for node in exportlist:
9653
      msg = exportlist[node].fail_msg
9654
      if msg:
9655
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
9656
        continue
9657
      if instance_name in exportlist[node].payload:
9658
        found = True
9659
        result = self.rpc.call_export_remove(node, instance_name)
9660
        msg = result.fail_msg
9661
        if msg:
9662
          logging.error("Could not remove export for instance %s"
9663
                        " on node %s: %s", instance_name, node, msg)
9664

    
9665
    if fqdn_warn and not found:
9666
      feedback_fn("Export not found. If trying to remove an export belonging"
9667
                  " to a deleted instance please use its Fully Qualified"
9668
                  " Domain Name.")
9669

    
9670

    
9671
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
9672
  """Generic tags LU.
9673

9674
  This is an abstract class which is the parent of all the other tags LUs.
9675

9676
  """
9677

    
9678
  def ExpandNames(self):
9679
    self.needed_locks = {}
9680
    if self.op.kind == constants.TAG_NODE:
9681
      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
9682
      self.needed_locks[locking.LEVEL_NODE] = self.op.name
9683
    elif self.op.kind == constants.TAG_INSTANCE:
9684
      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
9685
      self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
9686

    
9687
  def CheckPrereq(self):
9688
    """Check prerequisites.
9689

9690
    """
9691
    if self.op.kind == constants.TAG_CLUSTER:
9692
      self.target = self.cfg.GetClusterInfo()
9693
    elif self.op.kind == constants.TAG_NODE:
9694
      self.target = self.cfg.GetNodeInfo(self.op.name)
9695
    elif self.op.kind == constants.TAG_INSTANCE:
9696
      self.target = self.cfg.GetInstanceInfo(self.op.name)
9697
    else:
9698
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
9699
                                 str(self.op.kind), errors.ECODE_INVAL)
9700

    
9701

    
9702
class LUGetTags(TagsLU):
9703
  """Returns the tags of a given object.
9704

9705
  """
9706
  _OP_PARAMS = [
9707
    ("kind", _NoDefault, _TElemOf(constants.VALID_TAG_TYPES)),
9708
    ("name", _NoDefault, _TNonEmptyString),
9709
    ]
9710
  REQ_BGL = False
9711

    
9712
  def Exec(self, feedback_fn):
9713
    """Returns the tag list.
9714

9715
    """
9716
    return list(self.target.GetTags())
9717

    
9718

    
9719
class LUSearchTags(NoHooksLU):
9720
  """Searches the tags for a given pattern.
9721

9722
  """
9723
  _OP_PARAMS = [
9724
    ("pattern", _NoDefault, _TNonEmptyString),
9725
    ]
9726
  REQ_BGL = False
9727

    
9728
  def ExpandNames(self):
9729
    self.needed_locks = {}
9730

    
9731
  def CheckPrereq(self):
9732
    """Check prerequisites.
9733

9734
    This checks the pattern passed for validity by compiling it.
9735

9736
    """
9737
    try:
9738
      self.re = re.compile(self.op.pattern)
9739
    except re.error, err:
9740
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
9741
                                 (self.op.pattern, err), errors.ECODE_INVAL)
9742

    
9743
  def Exec(self, feedback_fn):
9744
    """Returns the tag list.
9745

9746
    """
9747
    cfg = self.cfg
9748
    tgts = [("/cluster", cfg.GetClusterInfo())]
9749
    ilist = cfg.GetAllInstancesInfo().values()
9750
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
9751
    nlist = cfg.GetAllNodesInfo().values()
9752
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
9753
    results = []
9754
    for path, target in tgts:
9755
      for tag in target.GetTags():
9756
        if self.re.search(tag):
9757
          results.append((path, tag))
9758
    return results
9759

    
9760

    
9761
class LUAddTags(TagsLU):
9762
  """Sets a tag on a given object.
9763

9764
  """
9765
  _OP_PARAMS = [
9766
    ("kind", _NoDefault, _TElemOf(constants.VALID_TAG_TYPES)),
9767
    ("name", _NoDefault, _TNonEmptyString),
9768
    ("tags", _NoDefault, _TListOf(_TNonEmptyString)),
9769
    ]
9770
  REQ_BGL = False
9771

    
9772
  def CheckPrereq(self):
9773
    """Check prerequisites.
9774

9775
    This checks the type and length of the tag name and value.
9776

9777
    """
9778
    TagsLU.CheckPrereq(self)
9779
    for tag in self.op.tags:
9780
      objects.TaggableObject.ValidateTag(tag)
9781

    
9782
  def Exec(self, feedback_fn):
9783
    """Sets the tag.
9784

9785
    """
9786
    try:
9787
      for tag in self.op.tags:
9788
        self.target.AddTag(tag)
9789
    except errors.TagError, err:
9790
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
9791
    self.cfg.Update(self.target, feedback_fn)
9792

    
9793

    
9794
class LUDelTags(TagsLU):
9795
  """Delete a list of tags from a given object.
9796

9797
  """
9798
  _OP_PARAMS = [
9799
    ("kind", _NoDefault, _TElemOf(constants.VALID_TAG_TYPES)),
9800
    ("name", _NoDefault, _TNonEmptyString),
9801
    ("tags", _NoDefault, _TListOf(_TNonEmptyString)),
9802
    ]
9803
  REQ_BGL = False
9804

    
9805
  def CheckPrereq(self):
9806
    """Check prerequisites.
9807

9808
    This checks that we have the given tag.
9809

9810
    """
9811
    TagsLU.CheckPrereq(self)
9812
    for tag in self.op.tags:
9813
      objects.TaggableObject.ValidateTag(tag)
9814
    del_tags = frozenset(self.op.tags)
9815
    cur_tags = self.target.GetTags()
9816
    if not del_tags <= cur_tags:
9817
      diff_tags = del_tags - cur_tags
9818
      diff_names = ["'%s'" % tag for tag in diff_tags]
9819
      diff_names.sort()
9820
      raise errors.OpPrereqError("Tag(s) %s not found" %
9821
                                 (",".join(diff_names)), errors.ECODE_NOENT)
9822

    
9823
  def Exec(self, feedback_fn):
9824
    """Remove the tag from the object.
9825

9826
    """
9827
    for tag in self.op.tags:
9828
      self.target.RemoveTag(tag)
9829
    self.cfg.Update(self.target, feedback_fn)
9830

    
9831

    
9832
class LUTestDelay(NoHooksLU):
9833
  """Sleep for a specified amount of time.
9834

9835
  This LU sleeps on the master and/or nodes for a specified amount of
9836
  time.
9837

9838
  """
9839
  _OP_PARAMS = [
9840
    ("duration", _NoDefault, _TFloat),
9841
    ("on_master", True, _TBool),
9842
    ("on_nodes", _EmptyList, _TListOf(_TNonEmptyString)),
9843
    ("repeat", 0, _TPositiveInt)
9844
    ]
9845
  REQ_BGL = False
9846

    
9847
  def ExpandNames(self):
9848
    """Expand names and set required locks.
9849

9850
    This expands the node list, if any.
9851

9852
    """
9853
    self.needed_locks = {}
9854
    if self.op.on_nodes:
9855
      # _GetWantedNodes can be used here, but is not always appropriate to use
9856
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
9857
      # more information.
9858
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
9859
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
9860

    
9861
  def _TestDelay(self):
9862
    """Do the actual sleep.
9863

9864
    """
9865
    if self.op.on_master:
9866
      if not utils.TestDelay(self.op.duration):
9867
        raise errors.OpExecError("Error during master delay test")
9868
    if self.op.on_nodes:
9869
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
9870
      for node, node_result in result.items():
9871
        node_result.Raise("Failure during rpc call to node %s" % node)
9872

    
9873
  def Exec(self, feedback_fn):
9874
    """Execute the test delay opcode, with the wanted repetitions.
9875

9876
    """
9877
    if self.op.repeat == 0:
9878
      self._TestDelay()
9879
    else:
9880
      top_value = self.op.repeat - 1
9881
      for i in range(self.op.repeat):
9882
        self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
9883
        self._TestDelay()
9884

    
9885

    
9886
class LUTestJobqueue(NoHooksLU):
9887
  """Utility LU to test some aspects of the job queue.
9888

9889
  """
9890
  _OP_PARAMS = [
9891
    ("notify_waitlock", False, _TBool),
9892
    ("notify_exec", False, _TBool),
9893
    ("log_messages", _EmptyList, _TListOf(_TString)),
9894
    ("fail", False, _TBool),
9895
    ]
9896
  REQ_BGL = False
9897

    
9898
  # Must be lower than default timeout for WaitForJobChange to see whether it
9899
  # notices changed jobs
9900
  _CLIENT_CONNECT_TIMEOUT = 20.0
9901
  _CLIENT_CONFIRM_TIMEOUT = 60.0
9902

    
9903
  @classmethod
9904
  def _NotifyUsingSocket(cls, cb, errcls):
9905
    """Opens a Unix socket and waits for another program to connect.
9906

9907
    @type cb: callable
9908
    @param cb: Callback to send socket name to client
9909
    @type errcls: class
9910
    @param errcls: Exception class to use for errors
9911

9912
    """
9913
    # Using a temporary directory as there's no easy way to create temporary
9914
    # sockets without writing a custom loop around tempfile.mktemp and
9915
    # socket.bind
9916
    tmpdir = tempfile.mkdtemp()
9917
    try:
9918
      tmpsock = utils.PathJoin(tmpdir, "sock")
9919

    
9920
      logging.debug("Creating temporary socket at %s", tmpsock)
9921
      sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
9922
      try:
9923
        sock.bind(tmpsock)
9924
        sock.listen(1)
9925

    
9926
        # Send details to client
9927
        cb(tmpsock)
9928

    
9929
        # Wait for client to connect before continuing
9930
        sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
9931
        try:
9932
          (conn, _) = sock.accept()
9933
        except socket.error, err:
9934
          raise errcls("Client didn't connect in time (%s)" % err)
9935
      finally:
9936
        sock.close()
9937
    finally:
9938
      # Remove as soon as client is connected
9939
      shutil.rmtree(tmpdir)
9940

    
9941
    # Wait for client to close
9942
    try:
9943
      try:
9944
        # pylint: disable-msg=E1101
9945
        # Instance of '_socketobject' has no ... member
9946
        conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
9947
        conn.recv(1)
9948
      except socket.error, err:
9949
        raise errcls("Client failed to confirm notification (%s)" % err)
9950
    finally:
9951
      conn.close()
9952

    
9953
  def _SendNotification(self, test, arg, sockname):
9954
    """Sends a notification to the client.
9955

9956
    @type test: string
9957
    @param test: Test name
9958
    @param arg: Test argument (depends on test)
9959
    @type sockname: string
9960
    @param sockname: Socket path
9961

9962
    """
9963
    self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
9964

    
9965
  def _Notify(self, prereq, test, arg):
9966
    """Notifies the client of a test.
9967

9968
    @type prereq: bool
9969
    @param prereq: Whether this is a prereq-phase test
9970
    @type test: string
9971
    @param test: Test name
9972
    @param arg: Test argument (depends on test)
9973

9974
    """
9975
    if prereq:
9976
      errcls = errors.OpPrereqError
9977
    else:
9978
      errcls = errors.OpExecError
9979

    
9980
    return self._NotifyUsingSocket(compat.partial(self._SendNotification,
9981
                                                  test, arg),
9982
                                   errcls)
9983

    
9984
  def CheckArguments(self):
9985
    self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
9986
    self.expandnames_calls = 0
9987

    
9988
  def ExpandNames(self):
9989
    checkargs_calls = getattr(self, "checkargs_calls", 0)
9990
    if checkargs_calls < 1:
9991
      raise errors.ProgrammerError("CheckArguments was not called")
9992

    
9993
    self.expandnames_calls += 1
9994

    
9995
    if self.op.notify_waitlock:
9996
      self._Notify(True, constants.JQT_EXPANDNAMES, None)
9997

    
9998
    self.LogInfo("Expanding names")
9999

    
10000
    # Get lock on master node (just to get a lock, not for a particular reason)
10001
    self.needed_locks = {
10002
      locking.LEVEL_NODE: self.cfg.GetMasterNode(),
10003
      }
10004

    
10005
  def Exec(self, feedback_fn):
10006
    if self.expandnames_calls < 1:
10007
      raise errors.ProgrammerError("ExpandNames was not called")
10008

    
10009
    if self.op.notify_exec:
10010
      self._Notify(False, constants.JQT_EXEC, None)
10011

    
10012
    self.LogInfo("Executing")
10013

    
10014
    if self.op.log_messages:
10015
      for idx, msg in enumerate(self.op.log_messages):
10016
        self.LogInfo("Sending log message %s", idx + 1)
10017
        feedback_fn(constants.JQT_MSGPREFIX + msg)
10018
        # Report how many test messages have been sent
10019
        self._Notify(False, constants.JQT_LOGMSG, idx + 1)
10020

    
10021
    if self.op.fail:
10022
      raise errors.OpExecError("Opcode failure was requested")
10023

    
10024
    return True
10025

    
10026

    
10027
class IAllocator(object):
10028
  """IAllocator framework.
10029

10030
  An IAllocator instance has three sets of attributes:
10031
    - cfg that is needed to query the cluster
10032
    - input data (all members of the _KEYS class attribute are required)
10033
    - four buffer attributes (in|out_data|text), that represent the
10034
      input (to the external script) in text and data structure format,
10035
      and the output from it, again in two formats
10036
    - the result variables from the script (success, info, nodes) for
10037
      easy usage
10038

10039
  """
10040
  # pylint: disable-msg=R0902
10041
  # lots of instance attributes
10042
  _ALLO_KEYS = [
10043
    "name", "mem_size", "disks", "disk_template",
10044
    "os", "tags", "nics", "vcpus", "hypervisor",
10045
    ]
10046
  _RELO_KEYS = [
10047
    "name", "relocate_from",
10048
    ]
10049
  _EVAC_KEYS = [
10050
    "evac_nodes",
10051
    ]
10052

    
10053
  def __init__(self, cfg, rpc, mode, **kwargs):
10054
    self.cfg = cfg
10055
    self.rpc = rpc
10056
    # init buffer variables
10057
    self.in_text = self.out_text = self.in_data = self.out_data = None
10058
    # init all input fields so that pylint is happy
10059
    self.mode = mode
10060
    self.mem_size = self.disks = self.disk_template = None
10061
    self.os = self.tags = self.nics = self.vcpus = None
10062
    self.hypervisor = None
10063
    self.relocate_from = None
10064
    self.name = None
10065
    self.evac_nodes = None
10066
    # computed fields
10067
    self.required_nodes = None
10068
    # init result fields
10069
    self.success = self.info = self.result = None
10070
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
10071
      keyset = self._ALLO_KEYS
10072
      fn = self._AddNewInstance
10073
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
10074
      keyset = self._RELO_KEYS
10075
      fn = self._AddRelocateInstance
10076
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
10077
      keyset = self._EVAC_KEYS
10078
      fn = self._AddEvacuateNodes
10079
    else:
10080
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
10081
                                   " IAllocator" % self.mode)
10082
    for key in kwargs:
10083
      if key not in keyset:
10084
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
10085
                                     " IAllocator" % key)
10086
      setattr(self, key, kwargs[key])
10087

    
10088
    for key in keyset:
10089
      if key not in kwargs:
10090
        raise errors.ProgrammerError("Missing input parameter '%s' to"
10091
                                     " IAllocator" % key)
10092
    self._BuildInputData(fn)
10093

    
10094
  def _ComputeClusterData(self):
10095
    """Compute the generic allocator input data.
10096

10097
    This is the data that is independent of the actual operation.
10098

10099
    """
10100
    cfg = self.cfg
10101
    cluster_info = cfg.GetClusterInfo()
10102
    # cluster data
10103
    data = {
10104
      "version": constants.IALLOCATOR_VERSION,
10105
      "cluster_name": cfg.GetClusterName(),
10106
      "cluster_tags": list(cluster_info.GetTags()),
10107
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
10108
      # we don't have job IDs
10109
      }
10110
    iinfo = cfg.GetAllInstancesInfo().values()
10111
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
10112

    
10113
    # node data
10114
    node_results = {}
10115
    node_list = cfg.GetNodeList()
10116

    
10117
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
10118
      hypervisor_name = self.hypervisor
10119
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
10120
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
10121
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
10122
      hypervisor_name = cluster_info.enabled_hypervisors[0]
10123

    
10124
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
10125
                                        hypervisor_name)
10126
    node_iinfo = \
10127
      self.rpc.call_all_instances_info(node_list,
10128
                                       cluster_info.enabled_hypervisors)
10129
    for nname, nresult in node_data.items():
10130
      # first fill in static (config-based) values
10131
      ninfo = cfg.GetNodeInfo(nname)
10132
      pnr = {
10133
        "tags": list(ninfo.GetTags()),
10134
        "primary_ip": ninfo.primary_ip,
10135
        "secondary_ip": ninfo.secondary_ip,
10136
        "offline": ninfo.offline,
10137
        "drained": ninfo.drained,
10138
        "master_candidate": ninfo.master_candidate,
10139
        }
10140

    
10141
      if not (ninfo.offline or ninfo.drained):
10142
        nresult.Raise("Can't get data for node %s" % nname)
10143
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
10144
                                nname)
10145
        remote_info = nresult.payload
10146

    
10147
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
10148
                     'vg_size', 'vg_free', 'cpu_total']:
10149
          if attr not in remote_info:
10150
            raise errors.OpExecError("Node '%s' didn't return attribute"
10151
                                     " '%s'" % (nname, attr))
10152
          if not isinstance(remote_info[attr], int):
10153
            raise errors.OpExecError("Node '%s' returned invalid value"
10154
                                     " for '%s': %s" %
10155
                                     (nname, attr, remote_info[attr]))
10156
        # compute memory used by primary instances
10157
        i_p_mem = i_p_up_mem = 0
10158
        for iinfo, beinfo in i_list:
10159
          if iinfo.primary_node == nname:
10160
            i_p_mem += beinfo[constants.BE_MEMORY]
10161
            if iinfo.name not in node_iinfo[nname].payload:
10162
              i_used_mem = 0
10163
            else:
10164
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
10165
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
10166
            remote_info['memory_free'] -= max(0, i_mem_diff)
10167

    
10168
            if iinfo.admin_up:
10169
              i_p_up_mem += beinfo[constants.BE_MEMORY]
10170

    
10171
        # compute memory used by instances
10172
        pnr_dyn = {
10173
          "total_memory": remote_info['memory_total'],
10174
          "reserved_memory": remote_info['memory_dom0'],
10175
          "free_memory": remote_info['memory_free'],
10176
          "total_disk": remote_info['vg_size'],
10177
          "free_disk": remote_info['vg_free'],
10178
          "total_cpus": remote_info['cpu_total'],
10179
          "i_pri_memory": i_p_mem,
10180
          "i_pri_up_memory": i_p_up_mem,
10181
          }
10182
        pnr.update(pnr_dyn)
10183

    
10184
      node_results[nname] = pnr
10185
    data["nodes"] = node_results
10186

    
10187
    # instance data
10188
    instance_data = {}
10189
    for iinfo, beinfo in i_list:
10190
      nic_data = []
10191
      for nic in iinfo.nics:
10192
        filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
10193
        nic_dict = {"mac": nic.mac,
10194
                    "ip": nic.ip,
10195
                    "mode": filled_params[constants.NIC_MODE],
10196
                    "link": filled_params[constants.NIC_LINK],
10197
                   }
10198
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
10199
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
10200
        nic_data.append(nic_dict)
10201
      pir = {
10202
        "tags": list(iinfo.GetTags()),
10203
        "admin_up": iinfo.admin_up,
10204
        "vcpus": beinfo[constants.BE_VCPUS],
10205
        "memory": beinfo[constants.BE_MEMORY],
10206
        "os": iinfo.os,
10207
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
10208
        "nics": nic_data,
10209
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
10210
        "disk_template": iinfo.disk_template,
10211
        "hypervisor": iinfo.hypervisor,
10212
        }
10213
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
10214
                                                 pir["disks"])
10215
      instance_data[iinfo.name] = pir
10216

    
10217
    data["instances"] = instance_data
10218

    
10219
    self.in_data = data
10220

    
10221
  def _AddNewInstance(self):
10222
    """Add new instance data to allocator structure.
10223

10224
    This in combination with _AllocatorGetClusterData will create the
10225
    correct structure needed as input for the allocator.
10226

10227
    The checks for the completeness of the opcode must have already been
10228
    done.
10229

10230
    """
10231
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
10232

    
10233
    if self.disk_template in constants.DTS_NET_MIRROR:
10234
      self.required_nodes = 2
10235
    else:
10236
      self.required_nodes = 1
10237
    request = {
10238
      "name": self.name,
10239
      "disk_template": self.disk_template,
10240
      "tags": self.tags,
10241
      "os": self.os,
10242
      "vcpus": self.vcpus,
10243
      "memory": self.mem_size,
10244
      "disks": self.disks,
10245
      "disk_space_total": disk_space,
10246
      "nics": self.nics,
10247
      "required_nodes": self.required_nodes,
10248
      }
10249
    return request
10250

    
10251
  def _AddRelocateInstance(self):
10252
    """Add relocate instance data to allocator structure.
10253

10254
    This in combination with _IAllocatorGetClusterData will create the
10255
    correct structure needed as input for the allocator.
10256

10257
    The checks for the completeness of the opcode must have already been
10258
    done.
10259

10260
    """
10261
    instance = self.cfg.GetInstanceInfo(self.name)
10262
    if instance is None:
10263
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
10264
                                   " IAllocator" % self.name)
10265

    
10266
    if instance.disk_template not in constants.DTS_NET_MIRROR:
10267
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
10268
                                 errors.ECODE_INVAL)
10269

    
10270
    if len(instance.secondary_nodes) != 1:
10271
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
10272
                                 errors.ECODE_STATE)
10273

    
10274
    self.required_nodes = 1
10275
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
10276
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
10277

    
10278
    request = {
10279
      "name": self.name,
10280
      "disk_space_total": disk_space,
10281
      "required_nodes": self.required_nodes,
10282
      "relocate_from": self.relocate_from,
10283
      }
10284
    return request
10285

    
10286
  def _AddEvacuateNodes(self):
10287
    """Add evacuate nodes data to allocator structure.
10288

10289
    """
10290
    request = {
10291
      "evac_nodes": self.evac_nodes
10292
      }
10293
    return request
10294

    
10295
  def _BuildInputData(self, fn):
10296
    """Build input data structures.
10297

10298
    """
10299
    self._ComputeClusterData()
10300

    
10301
    request = fn()
10302
    request["type"] = self.mode
10303
    self.in_data["request"] = request
10304

    
10305
    self.in_text = serializer.Dump(self.in_data)
10306

    
10307
  def Run(self, name, validate=True, call_fn=None):
10308
    """Run an instance allocator and return the results.
10309

10310
    """
10311
    if call_fn is None:
10312
      call_fn = self.rpc.call_iallocator_runner
10313

    
10314
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
10315
    result.Raise("Failure while running the iallocator script")
10316

    
10317
    self.out_text = result.payload
10318
    if validate:
10319
      self._ValidateResult()
10320

    
10321
  def _ValidateResult(self):
10322
    """Process the allocator results.
10323

10324
    This will process and if successful save the result in
10325
    self.out_data and the other parameters.
10326

10327
    """
10328
    try:
10329
      rdict = serializer.Load(self.out_text)
10330
    except Exception, err:
10331
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
10332

    
10333
    if not isinstance(rdict, dict):
10334
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
10335

    
10336
    # TODO: remove backwards compatiblity in later versions
10337
    if "nodes" in rdict and "result" not in rdict:
10338
      rdict["result"] = rdict["nodes"]
10339
      del rdict["nodes"]
10340

    
10341
    for key in "success", "info", "result":
10342
      if key not in rdict:
10343
        raise errors.OpExecError("Can't parse iallocator results:"
10344
                                 " missing key '%s'" % key)
10345
      setattr(self, key, rdict[key])
10346

    
10347
    if not isinstance(rdict["result"], list):
10348
      raise errors.OpExecError("Can't parse iallocator results: 'result' key"
10349
                               " is not a list")
10350
    self.out_data = rdict
10351

    
10352

    
10353
class LUTestAllocator(NoHooksLU):
10354
  """Run allocator tests.
10355

10356
  This LU runs the allocator tests
10357

10358
  """
10359
  _OP_PARAMS = [
10360
    ("direction", _NoDefault, _TElemOf(constants.VALID_IALLOCATOR_DIRECTIONS)),
10361
    ("mode", _NoDefault, _TElemOf(constants.VALID_IALLOCATOR_MODES)),
10362
    ("name", _NoDefault, _TNonEmptyString),
10363
    ("nics", _NoDefault, _TOr(_TNone, _TListOf(
10364
      _TDictOf(_TElemOf(["mac", "ip", "bridge"]),
10365
               _TOr(_TNone, _TNonEmptyString))))),
10366
    ("disks", _NoDefault, _TOr(_TNone, _TList)),
10367
    ("hypervisor", None, _TMaybeString),
10368
    ("allocator", None, _TMaybeString),
10369
    ("tags", _EmptyList, _TListOf(_TNonEmptyString)),
10370
    ("mem_size", None, _TOr(_TNone, _TPositiveInt)),
10371
    ("vcpus", None, _TOr(_TNone, _TPositiveInt)),
10372
    ("os", None, _TMaybeString),
10373
    ("disk_template", None, _TMaybeString),
10374
    ("evac_nodes", None, _TOr(_TNone, _TListOf(_TNonEmptyString))),
10375
    ]
10376

    
10377
  def CheckPrereq(self):
10378
    """Check prerequisites.
10379

10380
    This checks the opcode parameters depending on the director and mode test.
10381

10382
    """
10383
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
10384
      for attr in ["mem_size", "disks", "disk_template",
10385
                   "os", "tags", "nics", "vcpus"]:
10386
        if not hasattr(self.op, attr):
10387
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
10388
                                     attr, errors.ECODE_INVAL)
10389
      iname = self.cfg.ExpandInstanceName(self.op.name)
10390
      if iname is not None:
10391
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
10392
                                   iname, errors.ECODE_EXISTS)
10393
      if not isinstance(self.op.nics, list):
10394
        raise errors.OpPrereqError("Invalid parameter 'nics'",
10395
                                   errors.ECODE_INVAL)
10396
      if not isinstance(self.op.disks, list):
10397
        raise errors.OpPrereqError("Invalid parameter 'disks'",
10398
                                   errors.ECODE_INVAL)
10399
      for row in self.op.disks:
10400
        if (not isinstance(row, dict) or
10401
            "size" not in row or
10402
            not isinstance(row["size"], int) or
10403
            "mode" not in row or
10404
            row["mode"] not in ['r', 'w']):
10405
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
10406
                                     " parameter", errors.ECODE_INVAL)
10407
      if self.op.hypervisor is None:
10408
        self.op.hypervisor = self.cfg.GetHypervisorType()
10409
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
10410
      fname = _ExpandInstanceName(self.cfg, self.op.name)
10411
      self.op.name = fname
10412
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
10413
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
10414
      if not hasattr(self.op, "evac_nodes"):
10415
        raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
10416
                                   " opcode input", errors.ECODE_INVAL)
10417
    else:
10418
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
10419
                                 self.op.mode, errors.ECODE_INVAL)
10420

    
10421
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
10422
      if self.op.allocator is None:
10423
        raise errors.OpPrereqError("Missing allocator name",
10424
                                   errors.ECODE_INVAL)
10425
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
10426
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
10427
                                 self.op.direction, errors.ECODE_INVAL)
10428

    
10429
  def Exec(self, feedback_fn):
10430
    """Run the allocator test.
10431

10432
    """
10433
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
10434
      ial = IAllocator(self.cfg, self.rpc,
10435
                       mode=self.op.mode,
10436
                       name=self.op.name,
10437
                       mem_size=self.op.mem_size,
10438
                       disks=self.op.disks,
10439
                       disk_template=self.op.disk_template,
10440
                       os=self.op.os,
10441
                       tags=self.op.tags,
10442
                       nics=self.op.nics,
10443
                       vcpus=self.op.vcpus,
10444
                       hypervisor=self.op.hypervisor,
10445
                       )
10446
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
10447
      ial = IAllocator(self.cfg, self.rpc,
10448
                       mode=self.op.mode,
10449
                       name=self.op.name,
10450
                       relocate_from=list(self.relocate_from),
10451
                       )
10452
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
10453
      ial = IAllocator(self.cfg, self.rpc,
10454
                       mode=self.op.mode,
10455
                       evac_nodes=self.op.evac_nodes)
10456
    else:
10457
      raise errors.ProgrammerError("Uncatched mode %s in"
10458
                                   " LUTestAllocator.Exec", self.op.mode)
10459

    
10460
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
10461
      result = ial.in_text
10462
    else:
10463
      ial.Run(self.op.allocator, validate=False)
10464
      result = ial.out_text
10465
    return result