Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 8c35561f

History | View | Annotate | Download (363.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0201,C0302
25

    
26
# W0201 since most LU attributes are defined in CheckPrereq or similar
27
# functions
28

    
29
# C0302: since we have waaaay to many lines in this module
30

    
31
import os
32
import os.path
33
import time
34
import re
35
import platform
36
import logging
37
import copy
38
import OpenSSL
39
import socket
40
import tempfile
41
import shutil
42

    
43
from ganeti import ssh
44
from ganeti import utils
45
from ganeti import errors
46
from ganeti import hypervisor
47
from ganeti import locking
48
from ganeti import constants
49
from ganeti import objects
50
from ganeti import serializer
51
from ganeti import ssconf
52
from ganeti import uidpool
53
from ganeti import compat
54
from ganeti import masterd
55
from ganeti import netutils
56

    
57
import ganeti.masterd.instance # pylint: disable-msg=W0611
58

    
59

    
60
# Modifiable default values; need to define these here before the
61
# actual LUs
62

    
63
def _EmptyList():
64
  """Returns an empty list.
65

66
  """
67
  return []
68

    
69

    
70
def _EmptyDict():
71
  """Returns an empty dict.
72

73
  """
74
  return {}
75

    
76

    
77
#: The without-default default value
78
_NoDefault = object()
79

    
80

    
81
#: The no-type (value to complex to check it in the type system)
82
_NoType = object()
83

    
84

    
85
# Some basic types
86
def _TNotNone(val):
87
  """Checks if the given value is not None.
88

89
  """
90
  return val is not None
91

    
92

    
93
def _TNone(val):
94
  """Checks if the given value is None.
95

96
  """
97
  return val is None
98

    
99

    
100
def _TBool(val):
101
  """Checks if the given value is a boolean.
102

103
  """
104
  return isinstance(val, bool)
105

    
106

    
107
def _TInt(val):
108
  """Checks if the given value is an integer.
109

110
  """
111
  return isinstance(val, int)
112

    
113

    
114
def _TFloat(val):
115
  """Checks if the given value is a float.
116

117
  """
118
  return isinstance(val, float)
119

    
120

    
121
def _TString(val):
122
  """Checks if the given value is a string.
123

124
  """
125
  return isinstance(val, basestring)
126

    
127

    
128
def _TTrue(val):
129
  """Checks if a given value evaluates to a boolean True value.
130

131
  """
132
  return bool(val)
133

    
134

    
135
def _TElemOf(target_list):
136
  """Builds a function that checks if a given value is a member of a list.
137

138
  """
139
  return lambda val: val in target_list
140

    
141

    
142
# Container types
143
def _TList(val):
144
  """Checks if the given value is a list.
145

146
  """
147
  return isinstance(val, list)
148

    
149

    
150
def _TDict(val):
151
  """Checks if the given value is a dictionary.
152

153
  """
154
  return isinstance(val, dict)
155

    
156

    
157
# Combinator types
158
def _TAnd(*args):
159
  """Combine multiple functions using an AND operation.
160

161
  """
162
  def fn(val):
163
    return compat.all(t(val) for t in args)
164
  return fn
165

    
166

    
167
def _TOr(*args):
168
  """Combine multiple functions using an AND operation.
169

170
  """
171
  def fn(val):
172
    return compat.any(t(val) for t in args)
173
  return fn
174

    
175

    
176
# Type aliases
177

    
178
#: a non-empty string
179
_TNonEmptyString = _TAnd(_TString, _TTrue)
180

    
181

    
182
#: a maybe non-empty string
183
_TMaybeString = _TOr(_TNonEmptyString, _TNone)
184

    
185

    
186
#: a maybe boolean (bool or none)
187
_TMaybeBool = _TOr(_TBool, _TNone)
188

    
189

    
190
#: a positive integer
191
_TPositiveInt = _TAnd(_TInt, lambda v: v >= 0)
192

    
193
#: a strictly positive integer
194
_TStrictPositiveInt = _TAnd(_TInt, lambda v: v > 0)
195

    
196

    
197
def _TListOf(my_type):
198
  """Checks if a given value is a list with all elements of the same type.
199

200
  """
201
  return _TAnd(_TList,
202
               lambda lst: compat.all(my_type(v) for v in lst))
203

    
204

    
205
def _TDictOf(key_type, val_type):
206
  """Checks a dict type for the type of its key/values.
207

208
  """
209
  return _TAnd(_TDict,
210
               lambda my_dict: (compat.all(key_type(v) for v in my_dict.keys())
211
                                and compat.all(val_type(v)
212
                                               for v in my_dict.values())))
213

    
214

    
215
# Common opcode attributes
216

    
217
#: output fields for a query operation
218
_POutputFields = ("output_fields", _NoDefault, _TListOf(_TNonEmptyString))
219

    
220

    
221
#: the shutdown timeout
222
_PShutdownTimeout = ("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT,
223
                     _TPositiveInt)
224

    
225
#: the force parameter
226
_PForce = ("force", False, _TBool)
227

    
228
#: a required instance name (for single-instance LUs)
229
_PInstanceName = ("instance_name", _NoDefault, _TNonEmptyString)
230

    
231

    
232
#: a required node name (for single-node LUs)
233
_PNodeName = ("node_name", _NoDefault, _TNonEmptyString)
234

    
235
#: the migration type (live/non-live)
236
_PMigrationMode = ("mode", None, _TOr(_TNone,
237
                                      _TElemOf(constants.HT_MIGRATION_MODES)))
238

    
239

    
240
# End types
241
class LogicalUnit(object):
242
  """Logical Unit base class.
243

244
  Subclasses must follow these rules:
245
    - implement ExpandNames
246
    - implement CheckPrereq (except when tasklets are used)
247
    - implement Exec (except when tasklets are used)
248
    - implement BuildHooksEnv
249
    - redefine HPATH and HTYPE
250
    - optionally redefine their run requirements:
251
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
252

253
  Note that all commands require root permissions.
254

255
  @ivar dry_run_result: the value (if any) that will be returned to the caller
256
      in dry-run mode (signalled by opcode dry_run parameter)
257
  @cvar _OP_PARAMS: a list of opcode attributes, their defaults values
258
      they should get if not already defined, and types they must match
259

260
  """
261
  HPATH = None
262
  HTYPE = None
263
  _OP_PARAMS = []
264
  REQ_BGL = True
265

    
266
  def __init__(self, processor, op, context, rpc):
267
    """Constructor for LogicalUnit.
268

269
    This needs to be overridden in derived classes in order to check op
270
    validity.
271

272
    """
273
    self.proc = processor
274
    self.op = op
275
    self.cfg = context.cfg
276
    self.context = context
277
    self.rpc = rpc
278
    # Dicts used to declare locking needs to mcpu
279
    self.needed_locks = None
280
    self.acquired_locks = {}
281
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
282
    self.add_locks = {}
283
    self.remove_locks = {}
284
    # Used to force good behavior when calling helper functions
285
    self.recalculate_locks = {}
286
    self.__ssh = None
287
    # logging
288
    self.Log = processor.Log # pylint: disable-msg=C0103
289
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
290
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
291
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
292
    # support for dry-run
293
    self.dry_run_result = None
294
    # support for generic debug attribute
295
    if (not hasattr(self.op, "debug_level") or
296
        not isinstance(self.op.debug_level, int)):
297
      self.op.debug_level = 0
298

    
299
    # Tasklets
300
    self.tasklets = None
301

    
302
    # The new kind-of-type-system
303
    op_id = self.op.OP_ID
304
    for attr_name, aval, test in self._OP_PARAMS:
305
      if not hasattr(op, attr_name):
306
        if aval == _NoDefault:
307
          raise errors.OpPrereqError("Required parameter '%s.%s' missing" %
308
                                     (op_id, attr_name), errors.ECODE_INVAL)
309
        else:
310
          if callable(aval):
311
            dval = aval()
312
          else:
313
            dval = aval
314
          setattr(self.op, attr_name, dval)
315
      attr_val = getattr(op, attr_name)
316
      if test == _NoType:
317
        # no tests here
318
        continue
319
      if not callable(test):
320
        raise errors.ProgrammerError("Validation for parameter '%s.%s' failed,"
321
                                     " given type is not a proper type (%s)" %
322
                                     (op_id, attr_name, test))
323
      if not test(attr_val):
324
        logging.error("OpCode %s, parameter %s, has invalid type %s/value %s",
325
                      self.op.OP_ID, attr_name, type(attr_val), attr_val)
326
        raise errors.OpPrereqError("Parameter '%s.%s' fails validation" %
327
                                   (op_id, attr_name), errors.ECODE_INVAL)
328

    
329
    self.CheckArguments()
330

    
331
  def __GetSSH(self):
332
    """Returns the SshRunner object
333

334
    """
335
    if not self.__ssh:
336
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
337
    return self.__ssh
338

    
339
  ssh = property(fget=__GetSSH)
340

    
341
  def CheckArguments(self):
342
    """Check syntactic validity for the opcode arguments.
343

344
    This method is for doing a simple syntactic check and ensure
345
    validity of opcode parameters, without any cluster-related
346
    checks. While the same can be accomplished in ExpandNames and/or
347
    CheckPrereq, doing these separate is better because:
348

349
      - ExpandNames is left as as purely a lock-related function
350
      - CheckPrereq is run after we have acquired locks (and possible
351
        waited for them)
352

353
    The function is allowed to change the self.op attribute so that
354
    later methods can no longer worry about missing parameters.
355

356
    """
357
    pass
358

    
359
  def ExpandNames(self):
360
    """Expand names for this LU.
361

362
    This method is called before starting to execute the opcode, and it should
363
    update all the parameters of the opcode to their canonical form (e.g. a
364
    short node name must be fully expanded after this method has successfully
365
    completed). This way locking, hooks, logging, ecc. can work correctly.
366

367
    LUs which implement this method must also populate the self.needed_locks
368
    member, as a dict with lock levels as keys, and a list of needed lock names
369
    as values. Rules:
370

371
      - use an empty dict if you don't need any lock
372
      - if you don't need any lock at a particular level omit that level
373
      - don't put anything for the BGL level
374
      - if you want all locks at a level use locking.ALL_SET as a value
375

376
    If you need to share locks (rather than acquire them exclusively) at one
377
    level you can modify self.share_locks, setting a true value (usually 1) for
378
    that level. By default locks are not shared.
379

380
    This function can also define a list of tasklets, which then will be
381
    executed in order instead of the usual LU-level CheckPrereq and Exec
382
    functions, if those are not defined by the LU.
383

384
    Examples::
385

386
      # Acquire all nodes and one instance
387
      self.needed_locks = {
388
        locking.LEVEL_NODE: locking.ALL_SET,
389
        locking.LEVEL_INSTANCE: ['instance1.example.com'],
390
      }
391
      # Acquire just two nodes
392
      self.needed_locks = {
393
        locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
394
      }
395
      # Acquire no locks
396
      self.needed_locks = {} # No, you can't leave it to the default value None
397

398
    """
399
    # The implementation of this method is mandatory only if the new LU is
400
    # concurrent, so that old LUs don't need to be changed all at the same
401
    # time.
402
    if self.REQ_BGL:
403
      self.needed_locks = {} # Exclusive LUs don't need locks.
404
    else:
405
      raise NotImplementedError
406

    
407
  def DeclareLocks(self, level):
408
    """Declare LU locking needs for a level
409

410
    While most LUs can just declare their locking needs at ExpandNames time,
411
    sometimes there's the need to calculate some locks after having acquired
412
    the ones before. This function is called just before acquiring locks at a
413
    particular level, but after acquiring the ones at lower levels, and permits
414
    such calculations. It can be used to modify self.needed_locks, and by
415
    default it does nothing.
416

417
    This function is only called if you have something already set in
418
    self.needed_locks for the level.
419

420
    @param level: Locking level which is going to be locked
421
    @type level: member of ganeti.locking.LEVELS
422

423
    """
424

    
425
  def CheckPrereq(self):
426
    """Check prerequisites for this LU.
427

428
    This method should check that the prerequisites for the execution
429
    of this LU are fulfilled. It can do internode communication, but
430
    it should be idempotent - no cluster or system changes are
431
    allowed.
432

433
    The method should raise errors.OpPrereqError in case something is
434
    not fulfilled. Its return value is ignored.
435

436
    This method should also update all the parameters of the opcode to
437
    their canonical form if it hasn't been done by ExpandNames before.
438

439
    """
440
    if self.tasklets is not None:
441
      for (idx, tl) in enumerate(self.tasklets):
442
        logging.debug("Checking prerequisites for tasklet %s/%s",
443
                      idx + 1, len(self.tasklets))
444
        tl.CheckPrereq()
445
    else:
446
      pass
447

    
448
  def Exec(self, feedback_fn):
449
    """Execute the LU.
450

451
    This method should implement the actual work. It should raise
452
    errors.OpExecError for failures that are somewhat dealt with in
453
    code, or expected.
454

455
    """
456
    if self.tasklets is not None:
457
      for (idx, tl) in enumerate(self.tasklets):
458
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
459
        tl.Exec(feedback_fn)
460
    else:
461
      raise NotImplementedError
462

    
463
  def BuildHooksEnv(self):
464
    """Build hooks environment for this LU.
465

466
    This method should return a three-node tuple consisting of: a dict
467
    containing the environment that will be used for running the
468
    specific hook for this LU, a list of node names on which the hook
469
    should run before the execution, and a list of node names on which
470
    the hook should run after the execution.
471

472
    The keys of the dict must not have 'GANETI_' prefixed as this will
473
    be handled in the hooks runner. Also note additional keys will be
474
    added by the hooks runner. If the LU doesn't define any
475
    environment, an empty dict (and not None) should be returned.
476

477
    No nodes should be returned as an empty list (and not None).
478

479
    Note that if the HPATH for a LU class is None, this function will
480
    not be called.
481

482
    """
483
    raise NotImplementedError
484

    
485
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
486
    """Notify the LU about the results of its hooks.
487

488
    This method is called every time a hooks phase is executed, and notifies
489
    the Logical Unit about the hooks' result. The LU can then use it to alter
490
    its result based on the hooks.  By default the method does nothing and the
491
    previous result is passed back unchanged but any LU can define it if it
492
    wants to use the local cluster hook-scripts somehow.
493

494
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
495
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
496
    @param hook_results: the results of the multi-node hooks rpc call
497
    @param feedback_fn: function used send feedback back to the caller
498
    @param lu_result: the previous Exec result this LU had, or None
499
        in the PRE phase
500
    @return: the new Exec result, based on the previous result
501
        and hook results
502

503
    """
504
    # API must be kept, thus we ignore the unused argument and could
505
    # be a function warnings
506
    # pylint: disable-msg=W0613,R0201
507
    return lu_result
508

    
509
  def _ExpandAndLockInstance(self):
510
    """Helper function to expand and lock an instance.
511

512
    Many LUs that work on an instance take its name in self.op.instance_name
513
    and need to expand it and then declare the expanded name for locking. This
514
    function does it, and then updates self.op.instance_name to the expanded
515
    name. It also initializes needed_locks as a dict, if this hasn't been done
516
    before.
517

518
    """
519
    if self.needed_locks is None:
520
      self.needed_locks = {}
521
    else:
522
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
523
        "_ExpandAndLockInstance called with instance-level locks set"
524
    self.op.instance_name = _ExpandInstanceName(self.cfg,
525
                                                self.op.instance_name)
526
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
527

    
528
  def _LockInstancesNodes(self, primary_only=False):
529
    """Helper function to declare instances' nodes for locking.
530

531
    This function should be called after locking one or more instances to lock
532
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
533
    with all primary or secondary nodes for instances already locked and
534
    present in self.needed_locks[locking.LEVEL_INSTANCE].
535

536
    It should be called from DeclareLocks, and for safety only works if
537
    self.recalculate_locks[locking.LEVEL_NODE] is set.
538

539
    In the future it may grow parameters to just lock some instance's nodes, or
540
    to just lock primaries or secondary nodes, if needed.
541

542
    If should be called in DeclareLocks in a way similar to::
543

544
      if level == locking.LEVEL_NODE:
545
        self._LockInstancesNodes()
546

547
    @type primary_only: boolean
548
    @param primary_only: only lock primary nodes of locked instances
549

550
    """
551
    assert locking.LEVEL_NODE in self.recalculate_locks, \
552
      "_LockInstancesNodes helper function called with no nodes to recalculate"
553

    
554
    # TODO: check if we're really been called with the instance locks held
555

    
556
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
557
    # future we might want to have different behaviors depending on the value
558
    # of self.recalculate_locks[locking.LEVEL_NODE]
559
    wanted_nodes = []
560
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
561
      instance = self.context.cfg.GetInstanceInfo(instance_name)
562
      wanted_nodes.append(instance.primary_node)
563
      if not primary_only:
564
        wanted_nodes.extend(instance.secondary_nodes)
565

    
566
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
567
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
568
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
569
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
570

    
571
    del self.recalculate_locks[locking.LEVEL_NODE]
572

    
573

    
574
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
575
  """Simple LU which runs no hooks.
576

577
  This LU is intended as a parent for other LogicalUnits which will
578
  run no hooks, in order to reduce duplicate code.
579

580
  """
581
  HPATH = None
582
  HTYPE = None
583

    
584
  def BuildHooksEnv(self):
585
    """Empty BuildHooksEnv for NoHooksLu.
586

587
    This just raises an error.
588

589
    """
590
    assert False, "BuildHooksEnv called for NoHooksLUs"
591

    
592

    
593
class Tasklet:
594
  """Tasklet base class.
595

596
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
597
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
598
  tasklets know nothing about locks.
599

600
  Subclasses must follow these rules:
601
    - Implement CheckPrereq
602
    - Implement Exec
603

604
  """
605
  def __init__(self, lu):
606
    self.lu = lu
607

    
608
    # Shortcuts
609
    self.cfg = lu.cfg
610
    self.rpc = lu.rpc
611

    
612
  def CheckPrereq(self):
613
    """Check prerequisites for this tasklets.
614

615
    This method should check whether the prerequisites for the execution of
616
    this tasklet are fulfilled. It can do internode communication, but it
617
    should be idempotent - no cluster or system changes are allowed.
618

619
    The method should raise errors.OpPrereqError in case something is not
620
    fulfilled. Its return value is ignored.
621

622
    This method should also update all parameters to their canonical form if it
623
    hasn't been done before.
624

625
    """
626
    pass
627

    
628
  def Exec(self, feedback_fn):
629
    """Execute the tasklet.
630

631
    This method should implement the actual work. It should raise
632
    errors.OpExecError for failures that are somewhat dealt with in code, or
633
    expected.
634

635
    """
636
    raise NotImplementedError
637

    
638

    
639
def _GetWantedNodes(lu, nodes):
640
  """Returns list of checked and expanded node names.
641

642
  @type lu: L{LogicalUnit}
643
  @param lu: the logical unit on whose behalf we execute
644
  @type nodes: list
645
  @param nodes: list of node names or None for all nodes
646
  @rtype: list
647
  @return: the list of nodes, sorted
648
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
649

650
  """
651
  if not nodes:
652
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
653
      " non-empty list of nodes whose name is to be expanded.")
654

    
655
  wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes]
656
  return utils.NiceSort(wanted)
657

    
658

    
659
def _GetWantedInstances(lu, instances):
660
  """Returns list of checked and expanded instance names.
661

662
  @type lu: L{LogicalUnit}
663
  @param lu: the logical unit on whose behalf we execute
664
  @type instances: list
665
  @param instances: list of instance names or None for all instances
666
  @rtype: list
667
  @return: the list of instances, sorted
668
  @raise errors.OpPrereqError: if the instances parameter is wrong type
669
  @raise errors.OpPrereqError: if any of the passed instances is not found
670

671
  """
672
  if instances:
673
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
674
  else:
675
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
676
  return wanted
677

    
678

    
679
def _GetUpdatedParams(old_params, update_dict,
680
                      use_default=True, use_none=False):
681
  """Return the new version of a parameter dictionary.
682

683
  @type old_params: dict
684
  @param old_params: old parameters
685
  @type update_dict: dict
686
  @param update_dict: dict containing new parameter values, or
687
      constants.VALUE_DEFAULT to reset the parameter to its default
688
      value
689
  @param use_default: boolean
690
  @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
691
      values as 'to be deleted' values
692
  @param use_none: boolean
693
  @type use_none: whether to recognise C{None} values as 'to be
694
      deleted' values
695
  @rtype: dict
696
  @return: the new parameter dictionary
697

698
  """
699
  params_copy = copy.deepcopy(old_params)
700
  for key, val in update_dict.iteritems():
701
    if ((use_default and val == constants.VALUE_DEFAULT) or
702
        (use_none and val is None)):
703
      try:
704
        del params_copy[key]
705
      except KeyError:
706
        pass
707
    else:
708
      params_copy[key] = val
709
  return params_copy
710

    
711

    
712
def _CheckOutputFields(static, dynamic, selected):
713
  """Checks whether all selected fields are valid.
714

715
  @type static: L{utils.FieldSet}
716
  @param static: static fields set
717
  @type dynamic: L{utils.FieldSet}
718
  @param dynamic: dynamic fields set
719

720
  """
721
  f = utils.FieldSet()
722
  f.Extend(static)
723
  f.Extend(dynamic)
724

    
725
  delta = f.NonMatching(selected)
726
  if delta:
727
    raise errors.OpPrereqError("Unknown output fields selected: %s"
728
                               % ",".join(delta), errors.ECODE_INVAL)
729

    
730

    
731
def _CheckGlobalHvParams(params):
732
  """Validates that given hypervisor params are not global ones.
733

734
  This will ensure that instances don't get customised versions of
735
  global params.
736

737
  """
738
  used_globals = constants.HVC_GLOBALS.intersection(params)
739
  if used_globals:
740
    msg = ("The following hypervisor parameters are global and cannot"
741
           " be customized at instance level, please modify them at"
742
           " cluster level: %s" % utils.CommaJoin(used_globals))
743
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
744

    
745

    
746
def _CheckNodeOnline(lu, node):
747
  """Ensure that a given node is online.
748

749
  @param lu: the LU on behalf of which we make the check
750
  @param node: the node to check
751
  @raise errors.OpPrereqError: if the node is offline
752

753
  """
754
  if lu.cfg.GetNodeInfo(node).offline:
755
    raise errors.OpPrereqError("Can't use offline node %s" % node,
756
                               errors.ECODE_INVAL)
757

    
758

    
759
def _CheckNodeNotDrained(lu, node):
760
  """Ensure that a given node is not drained.
761

762
  @param lu: the LU on behalf of which we make the check
763
  @param node: the node to check
764
  @raise errors.OpPrereqError: if the node is drained
765

766
  """
767
  if lu.cfg.GetNodeInfo(node).drained:
768
    raise errors.OpPrereqError("Can't use drained node %s" % node,
769
                               errors.ECODE_INVAL)
770

    
771

    
772
def _CheckNodeHasOS(lu, node, os_name, force_variant):
773
  """Ensure that a node supports a given OS.
774

775
  @param lu: the LU on behalf of which we make the check
776
  @param node: the node to check
777
  @param os_name: the OS to query about
778
  @param force_variant: whether to ignore variant errors
779
  @raise errors.OpPrereqError: if the node is not supporting the OS
780

781
  """
782
  result = lu.rpc.call_os_get(node, os_name)
783
  result.Raise("OS '%s' not in supported OS list for node %s" %
784
               (os_name, node),
785
               prereq=True, ecode=errors.ECODE_INVAL)
786
  if not force_variant:
787
    _CheckOSVariant(result.payload, os_name)
788

    
789

    
790
def _RequireFileStorage():
791
  """Checks that file storage is enabled.
792

793
  @raise errors.OpPrereqError: when file storage is disabled
794

795
  """
796
  if not constants.ENABLE_FILE_STORAGE:
797
    raise errors.OpPrereqError("File storage disabled at configure time",
798
                               errors.ECODE_INVAL)
799

    
800

    
801
def _CheckDiskTemplate(template):
802
  """Ensure a given disk template is valid.
803

804
  """
805
  if template not in constants.DISK_TEMPLATES:
806
    msg = ("Invalid disk template name '%s', valid templates are: %s" %
807
           (template, utils.CommaJoin(constants.DISK_TEMPLATES)))
808
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
809
  if template == constants.DT_FILE:
810
    _RequireFileStorage()
811
  return True
812

    
813

    
814
def _CheckStorageType(storage_type):
815
  """Ensure a given storage type is valid.
816

817
  """
818
  if storage_type not in constants.VALID_STORAGE_TYPES:
819
    raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
820
                               errors.ECODE_INVAL)
821
  if storage_type == constants.ST_FILE:
822
    _RequireFileStorage()
823
  return True
824

    
825

    
826
def _GetClusterDomainSecret():
827
  """Reads the cluster domain secret.
828

829
  """
830
  return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
831
                               strict=True)
832

    
833

    
834
def _CheckInstanceDown(lu, instance, reason):
835
  """Ensure that an instance is not running."""
836
  if instance.admin_up:
837
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
838
                               (instance.name, reason), errors.ECODE_STATE)
839

    
840
  pnode = instance.primary_node
841
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
842
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
843
              prereq=True, ecode=errors.ECODE_ENVIRON)
844

    
845
  if instance.name in ins_l.payload:
846
    raise errors.OpPrereqError("Instance %s is running, %s" %
847
                               (instance.name, reason), errors.ECODE_STATE)
848

    
849

    
850
def _ExpandItemName(fn, name, kind):
851
  """Expand an item name.
852

853
  @param fn: the function to use for expansion
854
  @param name: requested item name
855
  @param kind: text description ('Node' or 'Instance')
856
  @return: the resolved (full) name
857
  @raise errors.OpPrereqError: if the item is not found
858

859
  """
860
  full_name = fn(name)
861
  if full_name is None:
862
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
863
                               errors.ECODE_NOENT)
864
  return full_name
865

    
866

    
867
def _ExpandNodeName(cfg, name):
868
  """Wrapper over L{_ExpandItemName} for nodes."""
869
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
870

    
871

    
872
def _ExpandInstanceName(cfg, name):
873
  """Wrapper over L{_ExpandItemName} for instance."""
874
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
875

    
876

    
877
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
878
                          memory, vcpus, nics, disk_template, disks,
879
                          bep, hvp, hypervisor_name):
880
  """Builds instance related env variables for hooks
881

882
  This builds the hook environment from individual variables.
883

884
  @type name: string
885
  @param name: the name of the instance
886
  @type primary_node: string
887
  @param primary_node: the name of the instance's primary node
888
  @type secondary_nodes: list
889
  @param secondary_nodes: list of secondary nodes as strings
890
  @type os_type: string
891
  @param os_type: the name of the instance's OS
892
  @type status: boolean
893
  @param status: the should_run status of the instance
894
  @type memory: string
895
  @param memory: the memory size of the instance
896
  @type vcpus: string
897
  @param vcpus: the count of VCPUs the instance has
898
  @type nics: list
899
  @param nics: list of tuples (ip, mac, mode, link) representing
900
      the NICs the instance has
901
  @type disk_template: string
902
  @param disk_template: the disk template of the instance
903
  @type disks: list
904
  @param disks: the list of (size, mode) pairs
905
  @type bep: dict
906
  @param bep: the backend parameters for the instance
907
  @type hvp: dict
908
  @param hvp: the hypervisor parameters for the instance
909
  @type hypervisor_name: string
910
  @param hypervisor_name: the hypervisor for the instance
911
  @rtype: dict
912
  @return: the hook environment for this instance
913

914
  """
915
  if status:
916
    str_status = "up"
917
  else:
918
    str_status = "down"
919
  env = {
920
    "OP_TARGET": name,
921
    "INSTANCE_NAME": name,
922
    "INSTANCE_PRIMARY": primary_node,
923
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
924
    "INSTANCE_OS_TYPE": os_type,
925
    "INSTANCE_STATUS": str_status,
926
    "INSTANCE_MEMORY": memory,
927
    "INSTANCE_VCPUS": vcpus,
928
    "INSTANCE_DISK_TEMPLATE": disk_template,
929
    "INSTANCE_HYPERVISOR": hypervisor_name,
930
  }
931

    
932
  if nics:
933
    nic_count = len(nics)
934
    for idx, (ip, mac, mode, link) in enumerate(nics):
935
      if ip is None:
936
        ip = ""
937
      env["INSTANCE_NIC%d_IP" % idx] = ip
938
      env["INSTANCE_NIC%d_MAC" % idx] = mac
939
      env["INSTANCE_NIC%d_MODE" % idx] = mode
940
      env["INSTANCE_NIC%d_LINK" % idx] = link
941
      if mode == constants.NIC_MODE_BRIDGED:
942
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
943
  else:
944
    nic_count = 0
945

    
946
  env["INSTANCE_NIC_COUNT"] = nic_count
947

    
948
  if disks:
949
    disk_count = len(disks)
950
    for idx, (size, mode) in enumerate(disks):
951
      env["INSTANCE_DISK%d_SIZE" % idx] = size
952
      env["INSTANCE_DISK%d_MODE" % idx] = mode
953
  else:
954
    disk_count = 0
955

    
956
  env["INSTANCE_DISK_COUNT"] = disk_count
957

    
958
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
959
    for key, value in source.items():
960
      env["INSTANCE_%s_%s" % (kind, key)] = value
961

    
962
  return env
963

    
964

    
965
def _NICListToTuple(lu, nics):
966
  """Build a list of nic information tuples.
967

968
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
969
  value in LUQueryInstanceData.
970

971
  @type lu:  L{LogicalUnit}
972
  @param lu: the logical unit on whose behalf we execute
973
  @type nics: list of L{objects.NIC}
974
  @param nics: list of nics to convert to hooks tuples
975

976
  """
977
  hooks_nics = []
978
  cluster = lu.cfg.GetClusterInfo()
979
  for nic in nics:
980
    ip = nic.ip
981
    mac = nic.mac
982
    filled_params = cluster.SimpleFillNIC(nic.nicparams)
983
    mode = filled_params[constants.NIC_MODE]
984
    link = filled_params[constants.NIC_LINK]
985
    hooks_nics.append((ip, mac, mode, link))
986
  return hooks_nics
987

    
988

    
989
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
990
  """Builds instance related env variables for hooks from an object.
991

992
  @type lu: L{LogicalUnit}
993
  @param lu: the logical unit on whose behalf we execute
994
  @type instance: L{objects.Instance}
995
  @param instance: the instance for which we should build the
996
      environment
997
  @type override: dict
998
  @param override: dictionary with key/values that will override
999
      our values
1000
  @rtype: dict
1001
  @return: the hook environment dictionary
1002

1003
  """
1004
  cluster = lu.cfg.GetClusterInfo()
1005
  bep = cluster.FillBE(instance)
1006
  hvp = cluster.FillHV(instance)
1007
  args = {
1008
    'name': instance.name,
1009
    'primary_node': instance.primary_node,
1010
    'secondary_nodes': instance.secondary_nodes,
1011
    'os_type': instance.os,
1012
    'status': instance.admin_up,
1013
    'memory': bep[constants.BE_MEMORY],
1014
    'vcpus': bep[constants.BE_VCPUS],
1015
    'nics': _NICListToTuple(lu, instance.nics),
1016
    'disk_template': instance.disk_template,
1017
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
1018
    'bep': bep,
1019
    'hvp': hvp,
1020
    'hypervisor_name': instance.hypervisor,
1021
  }
1022
  if override:
1023
    args.update(override)
1024
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
1025

    
1026

    
1027
def _AdjustCandidatePool(lu, exceptions):
1028
  """Adjust the candidate pool after node operations.
1029

1030
  """
1031
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
1032
  if mod_list:
1033
    lu.LogInfo("Promoted nodes to master candidate role: %s",
1034
               utils.CommaJoin(node.name for node in mod_list))
1035
    for name in mod_list:
1036
      lu.context.ReaddNode(name)
1037
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1038
  if mc_now > mc_max:
1039
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
1040
               (mc_now, mc_max))
1041

    
1042

    
1043
def _DecideSelfPromotion(lu, exceptions=None):
1044
  """Decide whether I should promote myself as a master candidate.
1045

1046
  """
1047
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
1048
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1049
  # the new node will increase mc_max with one, so:
1050
  mc_should = min(mc_should + 1, cp_size)
1051
  return mc_now < mc_should
1052

    
1053

    
1054
def _CheckNicsBridgesExist(lu, target_nics, target_node):
1055
  """Check that the brigdes needed by a list of nics exist.
1056

1057
  """
1058
  cluster = lu.cfg.GetClusterInfo()
1059
  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
1060
  brlist = [params[constants.NIC_LINK] for params in paramslist
1061
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
1062
  if brlist:
1063
    result = lu.rpc.call_bridges_exist(target_node, brlist)
1064
    result.Raise("Error checking bridges on destination node '%s'" %
1065
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
1066

    
1067

    
1068
def _CheckInstanceBridgesExist(lu, instance, node=None):
1069
  """Check that the brigdes needed by an instance exist.
1070

1071
  """
1072
  if node is None:
1073
    node = instance.primary_node
1074
  _CheckNicsBridgesExist(lu, instance.nics, node)
1075

    
1076

    
1077
def _CheckOSVariant(os_obj, name):
1078
  """Check whether an OS name conforms to the os variants specification.
1079

1080
  @type os_obj: L{objects.OS}
1081
  @param os_obj: OS object to check
1082
  @type name: string
1083
  @param name: OS name passed by the user, to check for validity
1084

1085
  """
1086
  if not os_obj.supported_variants:
1087
    return
1088
  try:
1089
    variant = name.split("+", 1)[1]
1090
  except IndexError:
1091
    raise errors.OpPrereqError("OS name must include a variant",
1092
                               errors.ECODE_INVAL)
1093

    
1094
  if variant not in os_obj.supported_variants:
1095
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1096

    
1097

    
1098
def _GetNodeInstancesInner(cfg, fn):
1099
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1100

    
1101

    
1102
def _GetNodeInstances(cfg, node_name):
1103
  """Returns a list of all primary and secondary instances on a node.
1104

1105
  """
1106

    
1107
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1108

    
1109

    
1110
def _GetNodePrimaryInstances(cfg, node_name):
1111
  """Returns primary instances on a node.
1112

1113
  """
1114
  return _GetNodeInstancesInner(cfg,
1115
                                lambda inst: node_name == inst.primary_node)
1116

    
1117

    
1118
def _GetNodeSecondaryInstances(cfg, node_name):
1119
  """Returns secondary instances on a node.
1120

1121
  """
1122
  return _GetNodeInstancesInner(cfg,
1123
                                lambda inst: node_name in inst.secondary_nodes)
1124

    
1125

    
1126
def _GetStorageTypeArgs(cfg, storage_type):
1127
  """Returns the arguments for a storage type.
1128

1129
  """
1130
  # Special case for file storage
1131
  if storage_type == constants.ST_FILE:
1132
    # storage.FileStorage wants a list of storage directories
1133
    return [[cfg.GetFileStorageDir()]]
1134

    
1135
  return []
1136

    
1137

    
1138
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1139
  faulty = []
1140

    
1141
  for dev in instance.disks:
1142
    cfg.SetDiskID(dev, node_name)
1143

    
1144
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1145
  result.Raise("Failed to get disk status from node %s" % node_name,
1146
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
1147

    
1148
  for idx, bdev_status in enumerate(result.payload):
1149
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1150
      faulty.append(idx)
1151

    
1152
  return faulty
1153

    
1154

    
1155
def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1156
  """Check the sanity of iallocator and node arguments and use the
1157
  cluster-wide iallocator if appropriate.
1158

1159
  Check that at most one of (iallocator, node) is specified. If none is
1160
  specified, then the LU's opcode's iallocator slot is filled with the
1161
  cluster-wide default iallocator.
1162

1163
  @type iallocator_slot: string
1164
  @param iallocator_slot: the name of the opcode iallocator slot
1165
  @type node_slot: string
1166
  @param node_slot: the name of the opcode target node slot
1167

1168
  """
1169
  node = getattr(lu.op, node_slot, None)
1170
  iallocator = getattr(lu.op, iallocator_slot, None)
1171

    
1172
  if node is not None and iallocator is not None:
1173
    raise errors.OpPrereqError("Do not specify both, iallocator and node.",
1174
                               errors.ECODE_INVAL)
1175
  elif node is None and iallocator is None:
1176
    default_iallocator = lu.cfg.GetDefaultIAllocator()
1177
    if default_iallocator:
1178
      setattr(lu.op, iallocator_slot, default_iallocator)
1179
    else:
1180
      raise errors.OpPrereqError("No iallocator or node given and no"
1181
                                 " cluster-wide default iallocator found."
1182
                                 " Please specify either an iallocator or a"
1183
                                 " node, or set a cluster-wide default"
1184
                                 " iallocator.")
1185

    
1186

    
1187
class LUPostInitCluster(LogicalUnit):
1188
  """Logical unit for running hooks after cluster initialization.
1189

1190
  """
1191
  HPATH = "cluster-init"
1192
  HTYPE = constants.HTYPE_CLUSTER
1193

    
1194
  def BuildHooksEnv(self):
1195
    """Build hooks env.
1196

1197
    """
1198
    env = {"OP_TARGET": self.cfg.GetClusterName()}
1199
    mn = self.cfg.GetMasterNode()
1200
    return env, [], [mn]
1201

    
1202
  def Exec(self, feedback_fn):
1203
    """Nothing to do.
1204

1205
    """
1206
    return True
1207

    
1208

    
1209
class LUDestroyCluster(LogicalUnit):
1210
  """Logical unit for destroying the cluster.
1211

1212
  """
1213
  HPATH = "cluster-destroy"
1214
  HTYPE = constants.HTYPE_CLUSTER
1215

    
1216
  def BuildHooksEnv(self):
1217
    """Build hooks env.
1218

1219
    """
1220
    env = {"OP_TARGET": self.cfg.GetClusterName()}
1221
    return env, [], []
1222

    
1223
  def CheckPrereq(self):
1224
    """Check prerequisites.
1225

1226
    This checks whether the cluster is empty.
1227

1228
    Any errors are signaled by raising errors.OpPrereqError.
1229

1230
    """
1231
    master = self.cfg.GetMasterNode()
1232

    
1233
    nodelist = self.cfg.GetNodeList()
1234
    if len(nodelist) != 1 or nodelist[0] != master:
1235
      raise errors.OpPrereqError("There are still %d node(s) in"
1236
                                 " this cluster." % (len(nodelist) - 1),
1237
                                 errors.ECODE_INVAL)
1238
    instancelist = self.cfg.GetInstanceList()
1239
    if instancelist:
1240
      raise errors.OpPrereqError("There are still %d instance(s) in"
1241
                                 " this cluster." % len(instancelist),
1242
                                 errors.ECODE_INVAL)
1243

    
1244
  def Exec(self, feedback_fn):
1245
    """Destroys the cluster.
1246

1247
    """
1248
    master = self.cfg.GetMasterNode()
1249
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
1250

    
1251
    # Run post hooks on master node before it's removed
1252
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1253
    try:
1254
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1255
    except:
1256
      # pylint: disable-msg=W0702
1257
      self.LogWarning("Errors occurred running hooks on %s" % master)
1258

    
1259
    result = self.rpc.call_node_stop_master(master, False)
1260
    result.Raise("Could not disable the master role")
1261

    
1262
    if modify_ssh_setup:
1263
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1264
      utils.CreateBackup(priv_key)
1265
      utils.CreateBackup(pub_key)
1266

    
1267
    return master
1268

    
1269

    
1270
def _VerifyCertificate(filename):
1271
  """Verifies a certificate for LUVerifyCluster.
1272

1273
  @type filename: string
1274
  @param filename: Path to PEM file
1275

1276
  """
1277
  try:
1278
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1279
                                           utils.ReadFile(filename))
1280
  except Exception, err: # pylint: disable-msg=W0703
1281
    return (LUVerifyCluster.ETYPE_ERROR,
1282
            "Failed to load X509 certificate %s: %s" % (filename, err))
1283

    
1284
  (errcode, msg) = \
1285
    utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1286
                                constants.SSL_CERT_EXPIRATION_ERROR)
1287

    
1288
  if msg:
1289
    fnamemsg = "While verifying %s: %s" % (filename, msg)
1290
  else:
1291
    fnamemsg = None
1292

    
1293
  if errcode is None:
1294
    return (None, fnamemsg)
1295
  elif errcode == utils.CERT_WARNING:
1296
    return (LUVerifyCluster.ETYPE_WARNING, fnamemsg)
1297
  elif errcode == utils.CERT_ERROR:
1298
    return (LUVerifyCluster.ETYPE_ERROR, fnamemsg)
1299

    
1300
  raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1301

    
1302

    
1303
class LUVerifyCluster(LogicalUnit):
1304
  """Verifies the cluster status.
1305

1306
  """
1307
  HPATH = "cluster-verify"
1308
  HTYPE = constants.HTYPE_CLUSTER
1309
  _OP_PARAMS = [
1310
    ("skip_checks", _EmptyList,
1311
     _TListOf(_TElemOf(constants.VERIFY_OPTIONAL_CHECKS))),
1312
    ("verbose", False, _TBool),
1313
    ("error_codes", False, _TBool),
1314
    ("debug_simulate_errors", False, _TBool),
1315
    ]
1316
  REQ_BGL = False
1317

    
1318
  TCLUSTER = "cluster"
1319
  TNODE = "node"
1320
  TINSTANCE = "instance"
1321

    
1322
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1323
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1324
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1325
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1326
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1327
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1328
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1329
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1330
  ENODEDRBD = (TNODE, "ENODEDRBD")
1331
  ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1332
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1333
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1334
  ENODEHV = (TNODE, "ENODEHV")
1335
  ENODELVM = (TNODE, "ENODELVM")
1336
  ENODEN1 = (TNODE, "ENODEN1")
1337
  ENODENET = (TNODE, "ENODENET")
1338
  ENODEOS = (TNODE, "ENODEOS")
1339
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1340
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1341
  ENODERPC = (TNODE, "ENODERPC")
1342
  ENODESSH = (TNODE, "ENODESSH")
1343
  ENODEVERSION = (TNODE, "ENODEVERSION")
1344
  ENODESETUP = (TNODE, "ENODESETUP")
1345
  ENODETIME = (TNODE, "ENODETIME")
1346

    
1347
  ETYPE_FIELD = "code"
1348
  ETYPE_ERROR = "ERROR"
1349
  ETYPE_WARNING = "WARNING"
1350

    
1351
  class NodeImage(object):
1352
    """A class representing the logical and physical status of a node.
1353

1354
    @type name: string
1355
    @ivar name: the node name to which this object refers
1356
    @ivar volumes: a structure as returned from
1357
        L{ganeti.backend.GetVolumeList} (runtime)
1358
    @ivar instances: a list of running instances (runtime)
1359
    @ivar pinst: list of configured primary instances (config)
1360
    @ivar sinst: list of configured secondary instances (config)
1361
    @ivar sbp: diction of {secondary-node: list of instances} of all peers
1362
        of this node (config)
1363
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1364
    @ivar dfree: free disk, as reported by the node (runtime)
1365
    @ivar offline: the offline status (config)
1366
    @type rpc_fail: boolean
1367
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1368
        not whether the individual keys were correct) (runtime)
1369
    @type lvm_fail: boolean
1370
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1371
    @type hyp_fail: boolean
1372
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1373
    @type ghost: boolean
1374
    @ivar ghost: whether this is a known node or not (config)
1375
    @type os_fail: boolean
1376
    @ivar os_fail: whether the RPC call didn't return valid OS data
1377
    @type oslist: list
1378
    @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1379

1380
    """
1381
    def __init__(self, offline=False, name=None):
1382
      self.name = name
1383
      self.volumes = {}
1384
      self.instances = []
1385
      self.pinst = []
1386
      self.sinst = []
1387
      self.sbp = {}
1388
      self.mfree = 0
1389
      self.dfree = 0
1390
      self.offline = offline
1391
      self.rpc_fail = False
1392
      self.lvm_fail = False
1393
      self.hyp_fail = False
1394
      self.ghost = False
1395
      self.os_fail = False
1396
      self.oslist = {}
1397

    
1398
  def ExpandNames(self):
1399
    self.needed_locks = {
1400
      locking.LEVEL_NODE: locking.ALL_SET,
1401
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1402
    }
1403
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1404

    
1405
  def _Error(self, ecode, item, msg, *args, **kwargs):
1406
    """Format an error message.
1407

1408
    Based on the opcode's error_codes parameter, either format a
1409
    parseable error code, or a simpler error string.
1410

1411
    This must be called only from Exec and functions called from Exec.
1412

1413
    """
1414
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1415
    itype, etxt = ecode
1416
    # first complete the msg
1417
    if args:
1418
      msg = msg % args
1419
    # then format the whole message
1420
    if self.op.error_codes:
1421
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1422
    else:
1423
      if item:
1424
        item = " " + item
1425
      else:
1426
        item = ""
1427
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1428
    # and finally report it via the feedback_fn
1429
    self._feedback_fn("  - %s" % msg)
1430

    
1431
  def _ErrorIf(self, cond, *args, **kwargs):
1432
    """Log an error message if the passed condition is True.
1433

1434
    """
1435
    cond = bool(cond) or self.op.debug_simulate_errors
1436
    if cond:
1437
      self._Error(*args, **kwargs)
1438
    # do not mark the operation as failed for WARN cases only
1439
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1440
      self.bad = self.bad or cond
1441

    
1442
  def _VerifyNode(self, ninfo, nresult):
1443
    """Perform some basic validation on data returned from a node.
1444

1445
      - check the result data structure is well formed and has all the
1446
        mandatory fields
1447
      - check ganeti version
1448

1449
    @type ninfo: L{objects.Node}
1450
    @param ninfo: the node to check
1451
    @param nresult: the results from the node
1452
    @rtype: boolean
1453
    @return: whether overall this call was successful (and we can expect
1454
         reasonable values in the respose)
1455

1456
    """
1457
    node = ninfo.name
1458
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1459

    
1460
    # main result, nresult should be a non-empty dict
1461
    test = not nresult or not isinstance(nresult, dict)
1462
    _ErrorIf(test, self.ENODERPC, node,
1463
                  "unable to verify node: no data returned")
1464
    if test:
1465
      return False
1466

    
1467
    # compares ganeti version
1468
    local_version = constants.PROTOCOL_VERSION
1469
    remote_version = nresult.get("version", None)
1470
    test = not (remote_version and
1471
                isinstance(remote_version, (list, tuple)) and
1472
                len(remote_version) == 2)
1473
    _ErrorIf(test, self.ENODERPC, node,
1474
             "connection to node returned invalid data")
1475
    if test:
1476
      return False
1477

    
1478
    test = local_version != remote_version[0]
1479
    _ErrorIf(test, self.ENODEVERSION, node,
1480
             "incompatible protocol versions: master %s,"
1481
             " node %s", local_version, remote_version[0])
1482
    if test:
1483
      return False
1484

    
1485
    # node seems compatible, we can actually try to look into its results
1486

    
1487
    # full package version
1488
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1489
                  self.ENODEVERSION, node,
1490
                  "software version mismatch: master %s, node %s",
1491
                  constants.RELEASE_VERSION, remote_version[1],
1492
                  code=self.ETYPE_WARNING)
1493

    
1494
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1495
    if isinstance(hyp_result, dict):
1496
      for hv_name, hv_result in hyp_result.iteritems():
1497
        test = hv_result is not None
1498
        _ErrorIf(test, self.ENODEHV, node,
1499
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1500

    
1501

    
1502
    test = nresult.get(constants.NV_NODESETUP,
1503
                           ["Missing NODESETUP results"])
1504
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1505
             "; ".join(test))
1506

    
1507
    return True
1508

    
1509
  def _VerifyNodeTime(self, ninfo, nresult,
1510
                      nvinfo_starttime, nvinfo_endtime):
1511
    """Check the node time.
1512

1513
    @type ninfo: L{objects.Node}
1514
    @param ninfo: the node to check
1515
    @param nresult: the remote results for the node
1516
    @param nvinfo_starttime: the start time of the RPC call
1517
    @param nvinfo_endtime: the end time of the RPC call
1518

1519
    """
1520
    node = ninfo.name
1521
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1522

    
1523
    ntime = nresult.get(constants.NV_TIME, None)
1524
    try:
1525
      ntime_merged = utils.MergeTime(ntime)
1526
    except (ValueError, TypeError):
1527
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1528
      return
1529

    
1530
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1531
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1532
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1533
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1534
    else:
1535
      ntime_diff = None
1536

    
1537
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1538
             "Node time diverges by at least %s from master node time",
1539
             ntime_diff)
1540

    
1541
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1542
    """Check the node time.
1543

1544
    @type ninfo: L{objects.Node}
1545
    @param ninfo: the node to check
1546
    @param nresult: the remote results for the node
1547
    @param vg_name: the configured VG name
1548

1549
    """
1550
    if vg_name is None:
1551
      return
1552

    
1553
    node = ninfo.name
1554
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1555

    
1556
    # checks vg existence and size > 20G
1557
    vglist = nresult.get(constants.NV_VGLIST, None)
1558
    test = not vglist
1559
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1560
    if not test:
1561
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1562
                                            constants.MIN_VG_SIZE)
1563
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1564

    
1565
    # check pv names
1566
    pvlist = nresult.get(constants.NV_PVLIST, None)
1567
    test = pvlist is None
1568
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1569
    if not test:
1570
      # check that ':' is not present in PV names, since it's a
1571
      # special character for lvcreate (denotes the range of PEs to
1572
      # use on the PV)
1573
      for _, pvname, owner_vg in pvlist:
1574
        test = ":" in pvname
1575
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1576
                 " '%s' of VG '%s'", pvname, owner_vg)
1577

    
1578
  def _VerifyNodeNetwork(self, ninfo, nresult):
1579
    """Check the node time.
1580

1581
    @type ninfo: L{objects.Node}
1582
    @param ninfo: the node to check
1583
    @param nresult: the remote results for the node
1584

1585
    """
1586
    node = ninfo.name
1587
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1588

    
1589
    test = constants.NV_NODELIST not in nresult
1590
    _ErrorIf(test, self.ENODESSH, node,
1591
             "node hasn't returned node ssh connectivity data")
1592
    if not test:
1593
      if nresult[constants.NV_NODELIST]:
1594
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1595
          _ErrorIf(True, self.ENODESSH, node,
1596
                   "ssh communication with node '%s': %s", a_node, a_msg)
1597

    
1598
    test = constants.NV_NODENETTEST not in nresult
1599
    _ErrorIf(test, self.ENODENET, node,
1600
             "node hasn't returned node tcp connectivity data")
1601
    if not test:
1602
      if nresult[constants.NV_NODENETTEST]:
1603
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1604
        for anode in nlist:
1605
          _ErrorIf(True, self.ENODENET, node,
1606
                   "tcp communication with node '%s': %s",
1607
                   anode, nresult[constants.NV_NODENETTEST][anode])
1608

    
1609
    test = constants.NV_MASTERIP not in nresult
1610
    _ErrorIf(test, self.ENODENET, node,
1611
             "node hasn't returned node master IP reachability data")
1612
    if not test:
1613
      if not nresult[constants.NV_MASTERIP]:
1614
        if node == self.master_node:
1615
          msg = "the master node cannot reach the master IP (not configured?)"
1616
        else:
1617
          msg = "cannot reach the master IP"
1618
        _ErrorIf(True, self.ENODENET, node, msg)
1619

    
1620

    
1621
  def _VerifyInstance(self, instance, instanceconfig, node_image):
1622
    """Verify an instance.
1623

1624
    This function checks to see if the required block devices are
1625
    available on the instance's node.
1626

1627
    """
1628
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1629
    node_current = instanceconfig.primary_node
1630

    
1631
    node_vol_should = {}
1632
    instanceconfig.MapLVsByNode(node_vol_should)
1633

    
1634
    for node in node_vol_should:
1635
      n_img = node_image[node]
1636
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1637
        # ignore missing volumes on offline or broken nodes
1638
        continue
1639
      for volume in node_vol_should[node]:
1640
        test = volume not in n_img.volumes
1641
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1642
                 "volume %s missing on node %s", volume, node)
1643

    
1644
    if instanceconfig.admin_up:
1645
      pri_img = node_image[node_current]
1646
      test = instance not in pri_img.instances and not pri_img.offline
1647
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1648
               "instance not running on its primary node %s",
1649
               node_current)
1650

    
1651
    for node, n_img in node_image.items():
1652
      if (not node == node_current):
1653
        test = instance in n_img.instances
1654
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1655
                 "instance should not run on node %s", node)
1656

    
1657
  def _VerifyOrphanVolumes(self, node_vol_should, node_image):
1658
    """Verify if there are any unknown volumes in the cluster.
1659

1660
    The .os, .swap and backup volumes are ignored. All other volumes are
1661
    reported as unknown.
1662

1663
    """
1664
    for node, n_img in node_image.items():
1665
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1666
        # skip non-healthy nodes
1667
        continue
1668
      for volume in n_img.volumes:
1669
        test = (node not in node_vol_should or
1670
                volume not in node_vol_should[node])
1671
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1672
                      "volume %s is unknown", volume)
1673

    
1674
  def _VerifyOrphanInstances(self, instancelist, node_image):
1675
    """Verify the list of running instances.
1676

1677
    This checks what instances are running but unknown to the cluster.
1678

1679
    """
1680
    for node, n_img in node_image.items():
1681
      for o_inst in n_img.instances:
1682
        test = o_inst not in instancelist
1683
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1684
                      "instance %s on node %s should not exist", o_inst, node)
1685

    
1686
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1687
    """Verify N+1 Memory Resilience.
1688

1689
    Check that if one single node dies we can still start all the
1690
    instances it was primary for.
1691

1692
    """
1693
    for node, n_img in node_image.items():
1694
      # This code checks that every node which is now listed as
1695
      # secondary has enough memory to host all instances it is
1696
      # supposed to should a single other node in the cluster fail.
1697
      # FIXME: not ready for failover to an arbitrary node
1698
      # FIXME: does not support file-backed instances
1699
      # WARNING: we currently take into account down instances as well
1700
      # as up ones, considering that even if they're down someone
1701
      # might want to start them even in the event of a node failure.
1702
      for prinode, instances in n_img.sbp.items():
1703
        needed_mem = 0
1704
        for instance in instances:
1705
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1706
          if bep[constants.BE_AUTO_BALANCE]:
1707
            needed_mem += bep[constants.BE_MEMORY]
1708
        test = n_img.mfree < needed_mem
1709
        self._ErrorIf(test, self.ENODEN1, node,
1710
                      "not enough memory on to accommodate"
1711
                      " failovers should peer node %s fail", prinode)
1712

    
1713
  def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1714
                       master_files):
1715
    """Verifies and computes the node required file checksums.
1716

1717
    @type ninfo: L{objects.Node}
1718
    @param ninfo: the node to check
1719
    @param nresult: the remote results for the node
1720
    @param file_list: required list of files
1721
    @param local_cksum: dictionary of local files and their checksums
1722
    @param master_files: list of files that only masters should have
1723

1724
    """
1725
    node = ninfo.name
1726
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1727

    
1728
    remote_cksum = nresult.get(constants.NV_FILELIST, None)
1729
    test = not isinstance(remote_cksum, dict)
1730
    _ErrorIf(test, self.ENODEFILECHECK, node,
1731
             "node hasn't returned file checksum data")
1732
    if test:
1733
      return
1734

    
1735
    for file_name in file_list:
1736
      node_is_mc = ninfo.master_candidate
1737
      must_have = (file_name not in master_files) or node_is_mc
1738
      # missing
1739
      test1 = file_name not in remote_cksum
1740
      # invalid checksum
1741
      test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1742
      # existing and good
1743
      test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1744
      _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1745
               "file '%s' missing", file_name)
1746
      _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1747
               "file '%s' has wrong checksum", file_name)
1748
      # not candidate and this is not a must-have file
1749
      _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1750
               "file '%s' should not exist on non master"
1751
               " candidates (and the file is outdated)", file_name)
1752
      # all good, except non-master/non-must have combination
1753
      _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1754
               "file '%s' should not exist"
1755
               " on non master candidates", file_name)
1756

    
1757
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
1758
                      drbd_map):
1759
    """Verifies and the node DRBD status.
1760

1761
    @type ninfo: L{objects.Node}
1762
    @param ninfo: the node to check
1763
    @param nresult: the remote results for the node
1764
    @param instanceinfo: the dict of instances
1765
    @param drbd_helper: the configured DRBD usermode helper
1766
    @param drbd_map: the DRBD map as returned by
1767
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1768

1769
    """
1770
    node = ninfo.name
1771
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1772

    
1773
    if drbd_helper:
1774
      helper_result = nresult.get(constants.NV_DRBDHELPER, None)
1775
      test = (helper_result == None)
1776
      _ErrorIf(test, self.ENODEDRBDHELPER, node,
1777
               "no drbd usermode helper returned")
1778
      if helper_result:
1779
        status, payload = helper_result
1780
        test = not status
1781
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
1782
                 "drbd usermode helper check unsuccessful: %s", payload)
1783
        test = status and (payload != drbd_helper)
1784
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
1785
                 "wrong drbd usermode helper: %s", payload)
1786

    
1787
    # compute the DRBD minors
1788
    node_drbd = {}
1789
    for minor, instance in drbd_map[node].items():
1790
      test = instance not in instanceinfo
1791
      _ErrorIf(test, self.ECLUSTERCFG, None,
1792
               "ghost instance '%s' in temporary DRBD map", instance)
1793
        # ghost instance should not be running, but otherwise we
1794
        # don't give double warnings (both ghost instance and
1795
        # unallocated minor in use)
1796
      if test:
1797
        node_drbd[minor] = (instance, False)
1798
      else:
1799
        instance = instanceinfo[instance]
1800
        node_drbd[minor] = (instance.name, instance.admin_up)
1801

    
1802
    # and now check them
1803
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
1804
    test = not isinstance(used_minors, (tuple, list))
1805
    _ErrorIf(test, self.ENODEDRBD, node,
1806
             "cannot parse drbd status file: %s", str(used_minors))
1807
    if test:
1808
      # we cannot check drbd status
1809
      return
1810

    
1811
    for minor, (iname, must_exist) in node_drbd.items():
1812
      test = minor not in used_minors and must_exist
1813
      _ErrorIf(test, self.ENODEDRBD, node,
1814
               "drbd minor %d of instance %s is not active", minor, iname)
1815
    for minor in used_minors:
1816
      test = minor not in node_drbd
1817
      _ErrorIf(test, self.ENODEDRBD, node,
1818
               "unallocated drbd minor %d is in use", minor)
1819

    
1820
  def _UpdateNodeOS(self, ninfo, nresult, nimg):
1821
    """Builds the node OS structures.
1822

1823
    @type ninfo: L{objects.Node}
1824
    @param ninfo: the node to check
1825
    @param nresult: the remote results for the node
1826
    @param nimg: the node image object
1827

1828
    """
1829
    node = ninfo.name
1830
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1831

    
1832
    remote_os = nresult.get(constants.NV_OSLIST, None)
1833
    test = (not isinstance(remote_os, list) or
1834
            not compat.all(isinstance(v, list) and len(v) == 7
1835
                           for v in remote_os))
1836

    
1837
    _ErrorIf(test, self.ENODEOS, node,
1838
             "node hasn't returned valid OS data")
1839

    
1840
    nimg.os_fail = test
1841

    
1842
    if test:
1843
      return
1844

    
1845
    os_dict = {}
1846

    
1847
    for (name, os_path, status, diagnose,
1848
         variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
1849

    
1850
      if name not in os_dict:
1851
        os_dict[name] = []
1852

    
1853
      # parameters is a list of lists instead of list of tuples due to
1854
      # JSON lacking a real tuple type, fix it:
1855
      parameters = [tuple(v) for v in parameters]
1856
      os_dict[name].append((os_path, status, diagnose,
1857
                            set(variants), set(parameters), set(api_ver)))
1858

    
1859
    nimg.oslist = os_dict
1860

    
1861
  def _VerifyNodeOS(self, ninfo, nimg, base):
1862
    """Verifies the node OS list.
1863

1864
    @type ninfo: L{objects.Node}
1865
    @param ninfo: the node to check
1866
    @param nimg: the node image object
1867
    @param base: the 'template' node we match against (e.g. from the master)
1868

1869
    """
1870
    node = ninfo.name
1871
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1872

    
1873
    assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
1874

    
1875
    for os_name, os_data in nimg.oslist.items():
1876
      assert os_data, "Empty OS status for OS %s?!" % os_name
1877
      f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
1878
      _ErrorIf(not f_status, self.ENODEOS, node,
1879
               "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
1880
      _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
1881
               "OS '%s' has multiple entries (first one shadows the rest): %s",
1882
               os_name, utils.CommaJoin([v[0] for v in os_data]))
1883
      # this will catched in backend too
1884
      _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
1885
               and not f_var, self.ENODEOS, node,
1886
               "OS %s with API at least %d does not declare any variant",
1887
               os_name, constants.OS_API_V15)
1888
      # comparisons with the 'base' image
1889
      test = os_name not in base.oslist
1890
      _ErrorIf(test, self.ENODEOS, node,
1891
               "Extra OS %s not present on reference node (%s)",
1892
               os_name, base.name)
1893
      if test:
1894
        continue
1895
      assert base.oslist[os_name], "Base node has empty OS status?"
1896
      _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
1897
      if not b_status:
1898
        # base OS is invalid, skipping
1899
        continue
1900
      for kind, a, b in [("API version", f_api, b_api),
1901
                         ("variants list", f_var, b_var),
1902
                         ("parameters", f_param, b_param)]:
1903
        _ErrorIf(a != b, self.ENODEOS, node,
1904
                 "OS %s %s differs from reference node %s: %s vs. %s",
1905
                 kind, os_name, base.name,
1906
                 utils.CommaJoin(a), utils.CommaJoin(b))
1907

    
1908
    # check any missing OSes
1909
    missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
1910
    _ErrorIf(missing, self.ENODEOS, node,
1911
             "OSes present on reference node %s but missing on this node: %s",
1912
             base.name, utils.CommaJoin(missing))
1913

    
1914
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1915
    """Verifies and updates the node volume data.
1916

1917
    This function will update a L{NodeImage}'s internal structures
1918
    with data from the remote call.
1919

1920
    @type ninfo: L{objects.Node}
1921
    @param ninfo: the node to check
1922
    @param nresult: the remote results for the node
1923
    @param nimg: the node image object
1924
    @param vg_name: the configured VG name
1925

1926
    """
1927
    node = ninfo.name
1928
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1929

    
1930
    nimg.lvm_fail = True
1931
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1932
    if vg_name is None:
1933
      pass
1934
    elif isinstance(lvdata, basestring):
1935
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1936
               utils.SafeEncode(lvdata))
1937
    elif not isinstance(lvdata, dict):
1938
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1939
    else:
1940
      nimg.volumes = lvdata
1941
      nimg.lvm_fail = False
1942

    
1943
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1944
    """Verifies and updates the node instance list.
1945

1946
    If the listing was successful, then updates this node's instance
1947
    list. Otherwise, it marks the RPC call as failed for the instance
1948
    list key.
1949

1950
    @type ninfo: L{objects.Node}
1951
    @param ninfo: the node to check
1952
    @param nresult: the remote results for the node
1953
    @param nimg: the node image object
1954

1955
    """
1956
    idata = nresult.get(constants.NV_INSTANCELIST, None)
1957
    test = not isinstance(idata, list)
1958
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1959
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
1960
    if test:
1961
      nimg.hyp_fail = True
1962
    else:
1963
      nimg.instances = idata
1964

    
1965
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1966
    """Verifies and computes a node information map
1967

1968
    @type ninfo: L{objects.Node}
1969
    @param ninfo: the node to check
1970
    @param nresult: the remote results for the node
1971
    @param nimg: the node image object
1972
    @param vg_name: the configured VG name
1973

1974
    """
1975
    node = ninfo.name
1976
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1977

    
1978
    # try to read free memory (from the hypervisor)
1979
    hv_info = nresult.get(constants.NV_HVINFO, None)
1980
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1981
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1982
    if not test:
1983
      try:
1984
        nimg.mfree = int(hv_info["memory_free"])
1985
      except (ValueError, TypeError):
1986
        _ErrorIf(True, self.ENODERPC, node,
1987
                 "node returned invalid nodeinfo, check hypervisor")
1988

    
1989
    # FIXME: devise a free space model for file based instances as well
1990
    if vg_name is not None:
1991
      test = (constants.NV_VGLIST not in nresult or
1992
              vg_name not in nresult[constants.NV_VGLIST])
1993
      _ErrorIf(test, self.ENODELVM, node,
1994
               "node didn't return data for the volume group '%s'"
1995
               " - it is either missing or broken", vg_name)
1996
      if not test:
1997
        try:
1998
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1999
        except (ValueError, TypeError):
2000
          _ErrorIf(True, self.ENODERPC, node,
2001
                   "node returned invalid LVM info, check LVM status")
2002

    
2003
  def BuildHooksEnv(self):
2004
    """Build hooks env.
2005

2006
    Cluster-Verify hooks just ran in the post phase and their failure makes
2007
    the output be logged in the verify output and the verification to fail.
2008

2009
    """
2010
    all_nodes = self.cfg.GetNodeList()
2011
    env = {
2012
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
2013
      }
2014
    for node in self.cfg.GetAllNodesInfo().values():
2015
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
2016

    
2017
    return env, [], all_nodes
2018

    
2019
  def Exec(self, feedback_fn):
2020
    """Verify integrity of cluster, performing various test on nodes.
2021

2022
    """
2023
    self.bad = False
2024
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2025
    verbose = self.op.verbose
2026
    self._feedback_fn = feedback_fn
2027
    feedback_fn("* Verifying global settings")
2028
    for msg in self.cfg.VerifyConfig():
2029
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
2030

    
2031
    # Check the cluster certificates
2032
    for cert_filename in constants.ALL_CERT_FILES:
2033
      (errcode, msg) = _VerifyCertificate(cert_filename)
2034
      _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
2035

    
2036
    vg_name = self.cfg.GetVGName()
2037
    drbd_helper = self.cfg.GetDRBDHelper()
2038
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2039
    cluster = self.cfg.GetClusterInfo()
2040
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
2041
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
2042
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
2043
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
2044
                        for iname in instancelist)
2045
    i_non_redundant = [] # Non redundant instances
2046
    i_non_a_balanced = [] # Non auto-balanced instances
2047
    n_offline = 0 # Count of offline nodes
2048
    n_drained = 0 # Count of nodes being drained
2049
    node_vol_should = {}
2050

    
2051
    # FIXME: verify OS list
2052
    # do local checksums
2053
    master_files = [constants.CLUSTER_CONF_FILE]
2054
    master_node = self.master_node = self.cfg.GetMasterNode()
2055
    master_ip = self.cfg.GetMasterIP()
2056

    
2057
    file_names = ssconf.SimpleStore().GetFileList()
2058
    file_names.extend(constants.ALL_CERT_FILES)
2059
    file_names.extend(master_files)
2060
    if cluster.modify_etc_hosts:
2061
      file_names.append(constants.ETC_HOSTS)
2062

    
2063
    local_checksums = utils.FingerprintFiles(file_names)
2064

    
2065
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
2066
    node_verify_param = {
2067
      constants.NV_FILELIST: file_names,
2068
      constants.NV_NODELIST: [node.name for node in nodeinfo
2069
                              if not node.offline],
2070
      constants.NV_HYPERVISOR: hypervisors,
2071
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
2072
                                  node.secondary_ip) for node in nodeinfo
2073
                                 if not node.offline],
2074
      constants.NV_INSTANCELIST: hypervisors,
2075
      constants.NV_VERSION: None,
2076
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2077
      constants.NV_NODESETUP: None,
2078
      constants.NV_TIME: None,
2079
      constants.NV_MASTERIP: (master_node, master_ip),
2080
      constants.NV_OSLIST: None,
2081
      }
2082

    
2083
    if vg_name is not None:
2084
      node_verify_param[constants.NV_VGLIST] = None
2085
      node_verify_param[constants.NV_LVLIST] = vg_name
2086
      node_verify_param[constants.NV_PVLIST] = [vg_name]
2087
      node_verify_param[constants.NV_DRBDLIST] = None
2088

    
2089
    if drbd_helper:
2090
      node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2091

    
2092
    # Build our expected cluster state
2093
    node_image = dict((node.name, self.NodeImage(offline=node.offline,
2094
                                                 name=node.name))
2095
                      for node in nodeinfo)
2096

    
2097
    for instance in instancelist:
2098
      inst_config = instanceinfo[instance]
2099

    
2100
      for nname in inst_config.all_nodes:
2101
        if nname not in node_image:
2102
          # ghost node
2103
          gnode = self.NodeImage(name=nname)
2104
          gnode.ghost = True
2105
          node_image[nname] = gnode
2106

    
2107
      inst_config.MapLVsByNode(node_vol_should)
2108

    
2109
      pnode = inst_config.primary_node
2110
      node_image[pnode].pinst.append(instance)
2111

    
2112
      for snode in inst_config.secondary_nodes:
2113
        nimg = node_image[snode]
2114
        nimg.sinst.append(instance)
2115
        if pnode not in nimg.sbp:
2116
          nimg.sbp[pnode] = []
2117
        nimg.sbp[pnode].append(instance)
2118

    
2119
    # At this point, we have the in-memory data structures complete,
2120
    # except for the runtime information, which we'll gather next
2121

    
2122
    # Due to the way our RPC system works, exact response times cannot be
2123
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2124
    # time before and after executing the request, we can at least have a time
2125
    # window.
2126
    nvinfo_starttime = time.time()
2127
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
2128
                                           self.cfg.GetClusterName())
2129
    nvinfo_endtime = time.time()
2130

    
2131
    all_drbd_map = self.cfg.ComputeDRBDMap()
2132

    
2133
    feedback_fn("* Verifying node status")
2134

    
2135
    refos_img = None
2136

    
2137
    for node_i in nodeinfo:
2138
      node = node_i.name
2139
      nimg = node_image[node]
2140

    
2141
      if node_i.offline:
2142
        if verbose:
2143
          feedback_fn("* Skipping offline node %s" % (node,))
2144
        n_offline += 1
2145
        continue
2146

    
2147
      if node == master_node:
2148
        ntype = "master"
2149
      elif node_i.master_candidate:
2150
        ntype = "master candidate"
2151
      elif node_i.drained:
2152
        ntype = "drained"
2153
        n_drained += 1
2154
      else:
2155
        ntype = "regular"
2156
      if verbose:
2157
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2158

    
2159
      msg = all_nvinfo[node].fail_msg
2160
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2161
      if msg:
2162
        nimg.rpc_fail = True
2163
        continue
2164

    
2165
      nresult = all_nvinfo[node].payload
2166

    
2167
      nimg.call_ok = self._VerifyNode(node_i, nresult)
2168
      self._VerifyNodeNetwork(node_i, nresult)
2169
      self._VerifyNodeLVM(node_i, nresult, vg_name)
2170
      self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
2171
                            master_files)
2172
      self._VerifyNodeDrbd(node_i, nresult, instanceinfo, drbd_helper,
2173
                           all_drbd_map)
2174
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2175

    
2176
      self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2177
      self._UpdateNodeInstances(node_i, nresult, nimg)
2178
      self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2179
      self._UpdateNodeOS(node_i, nresult, nimg)
2180
      if not nimg.os_fail:
2181
        if refos_img is None:
2182
          refos_img = nimg
2183
        self._VerifyNodeOS(node_i, nimg, refos_img)
2184

    
2185
    feedback_fn("* Verifying instance status")
2186
    for instance in instancelist:
2187
      if verbose:
2188
        feedback_fn("* Verifying instance %s" % instance)
2189
      inst_config = instanceinfo[instance]
2190
      self._VerifyInstance(instance, inst_config, node_image)
2191
      inst_nodes_offline = []
2192

    
2193
      pnode = inst_config.primary_node
2194
      pnode_img = node_image[pnode]
2195
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2196
               self.ENODERPC, pnode, "instance %s, connection to"
2197
               " primary node failed", instance)
2198

    
2199
      if pnode_img.offline:
2200
        inst_nodes_offline.append(pnode)
2201

    
2202
      # If the instance is non-redundant we cannot survive losing its primary
2203
      # node, so we are not N+1 compliant. On the other hand we have no disk
2204
      # templates with more than one secondary so that situation is not well
2205
      # supported either.
2206
      # FIXME: does not support file-backed instances
2207
      if not inst_config.secondary_nodes:
2208
        i_non_redundant.append(instance)
2209
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2210
               instance, "instance has multiple secondary nodes: %s",
2211
               utils.CommaJoin(inst_config.secondary_nodes),
2212
               code=self.ETYPE_WARNING)
2213

    
2214
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2215
        i_non_a_balanced.append(instance)
2216

    
2217
      for snode in inst_config.secondary_nodes:
2218
        s_img = node_image[snode]
2219
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2220
                 "instance %s, connection to secondary node failed", instance)
2221

    
2222
        if s_img.offline:
2223
          inst_nodes_offline.append(snode)
2224

    
2225
      # warn that the instance lives on offline nodes
2226
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2227
               "instance lives on offline node(s) %s",
2228
               utils.CommaJoin(inst_nodes_offline))
2229
      # ... or ghost nodes
2230
      for node in inst_config.all_nodes:
2231
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2232
                 "instance lives on ghost node %s", node)
2233

    
2234
    feedback_fn("* Verifying orphan volumes")
2235
    self._VerifyOrphanVolumes(node_vol_should, node_image)
2236

    
2237
    feedback_fn("* Verifying orphan instances")
2238
    self._VerifyOrphanInstances(instancelist, node_image)
2239

    
2240
    if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2241
      feedback_fn("* Verifying N+1 Memory redundancy")
2242
      self._VerifyNPlusOneMemory(node_image, instanceinfo)
2243

    
2244
    feedback_fn("* Other Notes")
2245
    if i_non_redundant:
2246
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
2247
                  % len(i_non_redundant))
2248

    
2249
    if i_non_a_balanced:
2250
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
2251
                  % len(i_non_a_balanced))
2252

    
2253
    if n_offline:
2254
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
2255

    
2256
    if n_drained:
2257
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
2258

    
2259
    return not self.bad
2260

    
2261
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2262
    """Analyze the post-hooks' result
2263

2264
    This method analyses the hook result, handles it, and sends some
2265
    nicely-formatted feedback back to the user.
2266

2267
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
2268
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2269
    @param hooks_results: the results of the multi-node hooks rpc call
2270
    @param feedback_fn: function used send feedback back to the caller
2271
    @param lu_result: previous Exec result
2272
    @return: the new Exec result, based on the previous result
2273
        and hook results
2274

2275
    """
2276
    # We only really run POST phase hooks, and are only interested in
2277
    # their results
2278
    if phase == constants.HOOKS_PHASE_POST:
2279
      # Used to change hooks' output to proper indentation
2280
      indent_re = re.compile('^', re.M)
2281
      feedback_fn("* Hooks Results")
2282
      assert hooks_results, "invalid result from hooks"
2283

    
2284
      for node_name in hooks_results:
2285
        res = hooks_results[node_name]
2286
        msg = res.fail_msg
2287
        test = msg and not res.offline
2288
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
2289
                      "Communication failure in hooks execution: %s", msg)
2290
        if res.offline or msg:
2291
          # No need to investigate payload if node is offline or gave an error.
2292
          # override manually lu_result here as _ErrorIf only
2293
          # overrides self.bad
2294
          lu_result = 1
2295
          continue
2296
        for script, hkr, output in res.payload:
2297
          test = hkr == constants.HKR_FAIL
2298
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
2299
                        "Script %s failed, output:", script)
2300
          if test:
2301
            output = indent_re.sub('      ', output)
2302
            feedback_fn("%s" % output)
2303
            lu_result = 0
2304

    
2305
      return lu_result
2306

    
2307

    
2308
class LUVerifyDisks(NoHooksLU):
2309
  """Verifies the cluster disks status.
2310

2311
  """
2312
  REQ_BGL = False
2313

    
2314
  def ExpandNames(self):
2315
    self.needed_locks = {
2316
      locking.LEVEL_NODE: locking.ALL_SET,
2317
      locking.LEVEL_INSTANCE: locking.ALL_SET,
2318
    }
2319
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2320

    
2321
  def Exec(self, feedback_fn):
2322
    """Verify integrity of cluster disks.
2323

2324
    @rtype: tuple of three items
2325
    @return: a tuple of (dict of node-to-node_error, list of instances
2326
        which need activate-disks, dict of instance: (node, volume) for
2327
        missing volumes
2328

2329
    """
2330
    result = res_nodes, res_instances, res_missing = {}, [], {}
2331

    
2332
    vg_name = self.cfg.GetVGName()
2333
    nodes = utils.NiceSort(self.cfg.GetNodeList())
2334
    instances = [self.cfg.GetInstanceInfo(name)
2335
                 for name in self.cfg.GetInstanceList()]
2336

    
2337
    nv_dict = {}
2338
    for inst in instances:
2339
      inst_lvs = {}
2340
      if (not inst.admin_up or
2341
          inst.disk_template not in constants.DTS_NET_MIRROR):
2342
        continue
2343
      inst.MapLVsByNode(inst_lvs)
2344
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
2345
      for node, vol_list in inst_lvs.iteritems():
2346
        for vol in vol_list:
2347
          nv_dict[(node, vol)] = inst
2348

    
2349
    if not nv_dict:
2350
      return result
2351

    
2352
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
2353

    
2354
    for node in nodes:
2355
      # node_volume
2356
      node_res = node_lvs[node]
2357
      if node_res.offline:
2358
        continue
2359
      msg = node_res.fail_msg
2360
      if msg:
2361
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2362
        res_nodes[node] = msg
2363
        continue
2364

    
2365
      lvs = node_res.payload
2366
      for lv_name, (_, _, lv_online) in lvs.items():
2367
        inst = nv_dict.pop((node, lv_name), None)
2368
        if (not lv_online and inst is not None
2369
            and inst.name not in res_instances):
2370
          res_instances.append(inst.name)
2371

    
2372
    # any leftover items in nv_dict are missing LVs, let's arrange the
2373
    # data better
2374
    for key, inst in nv_dict.iteritems():
2375
      if inst.name not in res_missing:
2376
        res_missing[inst.name] = []
2377
      res_missing[inst.name].append(key)
2378

    
2379
    return result
2380

    
2381

    
2382
class LURepairDiskSizes(NoHooksLU):
2383
  """Verifies the cluster disks sizes.
2384

2385
  """
2386
  _OP_PARAMS = [("instances", _EmptyList, _TListOf(_TNonEmptyString))]
2387
  REQ_BGL = False
2388

    
2389
  def ExpandNames(self):
2390
    if self.op.instances:
2391
      self.wanted_names = []
2392
      for name in self.op.instances:
2393
        full_name = _ExpandInstanceName(self.cfg, name)
2394
        self.wanted_names.append(full_name)
2395
      self.needed_locks = {
2396
        locking.LEVEL_NODE: [],
2397
        locking.LEVEL_INSTANCE: self.wanted_names,
2398
        }
2399
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2400
    else:
2401
      self.wanted_names = None
2402
      self.needed_locks = {
2403
        locking.LEVEL_NODE: locking.ALL_SET,
2404
        locking.LEVEL_INSTANCE: locking.ALL_SET,
2405
        }
2406
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2407

    
2408
  def DeclareLocks(self, level):
2409
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
2410
      self._LockInstancesNodes(primary_only=True)
2411

    
2412
  def CheckPrereq(self):
2413
    """Check prerequisites.
2414

2415
    This only checks the optional instance list against the existing names.
2416

2417
    """
2418
    if self.wanted_names is None:
2419
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2420

    
2421
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2422
                             in self.wanted_names]
2423

    
2424
  def _EnsureChildSizes(self, disk):
2425
    """Ensure children of the disk have the needed disk size.
2426

2427
    This is valid mainly for DRBD8 and fixes an issue where the
2428
    children have smaller disk size.
2429

2430
    @param disk: an L{ganeti.objects.Disk} object
2431

2432
    """
2433
    if disk.dev_type == constants.LD_DRBD8:
2434
      assert disk.children, "Empty children for DRBD8?"
2435
      fchild = disk.children[0]
2436
      mismatch = fchild.size < disk.size
2437
      if mismatch:
2438
        self.LogInfo("Child disk has size %d, parent %d, fixing",
2439
                     fchild.size, disk.size)
2440
        fchild.size = disk.size
2441

    
2442
      # and we recurse on this child only, not on the metadev
2443
      return self._EnsureChildSizes(fchild) or mismatch
2444
    else:
2445
      return False
2446

    
2447
  def Exec(self, feedback_fn):
2448
    """Verify the size of cluster disks.
2449

2450
    """
2451
    # TODO: check child disks too
2452
    # TODO: check differences in size between primary/secondary nodes
2453
    per_node_disks = {}
2454
    for instance in self.wanted_instances:
2455
      pnode = instance.primary_node
2456
      if pnode not in per_node_disks:
2457
        per_node_disks[pnode] = []
2458
      for idx, disk in enumerate(instance.disks):
2459
        per_node_disks[pnode].append((instance, idx, disk))
2460

    
2461
    changed = []
2462
    for node, dskl in per_node_disks.items():
2463
      newl = [v[2].Copy() for v in dskl]
2464
      for dsk in newl:
2465
        self.cfg.SetDiskID(dsk, node)
2466
      result = self.rpc.call_blockdev_getsizes(node, newl)
2467
      if result.fail_msg:
2468
        self.LogWarning("Failure in blockdev_getsizes call to node"
2469
                        " %s, ignoring", node)
2470
        continue
2471
      if len(result.data) != len(dskl):
2472
        self.LogWarning("Invalid result from node %s, ignoring node results",
2473
                        node)
2474
        continue
2475
      for ((instance, idx, disk), size) in zip(dskl, result.data):
2476
        if size is None:
2477
          self.LogWarning("Disk %d of instance %s did not return size"
2478
                          " information, ignoring", idx, instance.name)
2479
          continue
2480
        if not isinstance(size, (int, long)):
2481
          self.LogWarning("Disk %d of instance %s did not return valid"
2482
                          " size information, ignoring", idx, instance.name)
2483
          continue
2484
        size = size >> 20
2485
        if size != disk.size:
2486
          self.LogInfo("Disk %d of instance %s has mismatched size,"
2487
                       " correcting: recorded %d, actual %d", idx,
2488
                       instance.name, disk.size, size)
2489
          disk.size = size
2490
          self.cfg.Update(instance, feedback_fn)
2491
          changed.append((instance.name, idx, size))
2492
        if self._EnsureChildSizes(disk):
2493
          self.cfg.Update(instance, feedback_fn)
2494
          changed.append((instance.name, idx, disk.size))
2495
    return changed
2496

    
2497

    
2498
class LURenameCluster(LogicalUnit):
2499
  """Rename the cluster.
2500

2501
  """
2502
  HPATH = "cluster-rename"
2503
  HTYPE = constants.HTYPE_CLUSTER
2504
  _OP_PARAMS = [("name", _NoDefault, _TNonEmptyString)]
2505

    
2506
  def BuildHooksEnv(self):
2507
    """Build hooks env.
2508

2509
    """
2510
    env = {
2511
      "OP_TARGET": self.cfg.GetClusterName(),
2512
      "NEW_NAME": self.op.name,
2513
      }
2514
    mn = self.cfg.GetMasterNode()
2515
    all_nodes = self.cfg.GetNodeList()
2516
    return env, [mn], all_nodes
2517

    
2518
  def CheckPrereq(self):
2519
    """Verify that the passed name is a valid one.
2520

2521
    """
2522
    hostname = netutils.GetHostInfo(self.op.name)
2523

    
2524
    new_name = hostname.name
2525
    self.ip = new_ip = hostname.ip
2526
    old_name = self.cfg.GetClusterName()
2527
    old_ip = self.cfg.GetMasterIP()
2528
    if new_name == old_name and new_ip == old_ip:
2529
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
2530
                                 " cluster has changed",
2531
                                 errors.ECODE_INVAL)
2532
    if new_ip != old_ip:
2533
      if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2534
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
2535
                                   " reachable on the network. Aborting." %
2536
                                   new_ip, errors.ECODE_NOTUNIQUE)
2537

    
2538
    self.op.name = new_name
2539

    
2540
  def Exec(self, feedback_fn):
2541
    """Rename the cluster.
2542

2543
    """
2544
    clustername = self.op.name
2545
    ip = self.ip
2546

    
2547
    # shutdown the master IP
2548
    master = self.cfg.GetMasterNode()
2549
    result = self.rpc.call_node_stop_master(master, False)
2550
    result.Raise("Could not disable the master role")
2551

    
2552
    try:
2553
      cluster = self.cfg.GetClusterInfo()
2554
      cluster.cluster_name = clustername
2555
      cluster.master_ip = ip
2556
      self.cfg.Update(cluster, feedback_fn)
2557

    
2558
      # update the known hosts file
2559
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2560
      node_list = self.cfg.GetNodeList()
2561
      try:
2562
        node_list.remove(master)
2563
      except ValueError:
2564
        pass
2565
      result = self.rpc.call_upload_file(node_list,
2566
                                         constants.SSH_KNOWN_HOSTS_FILE)
2567
      for to_node, to_result in result.iteritems():
2568
        msg = to_result.fail_msg
2569
        if msg:
2570
          msg = ("Copy of file %s to node %s failed: %s" %
2571
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
2572
          self.proc.LogWarning(msg)
2573

    
2574
    finally:
2575
      result = self.rpc.call_node_start_master(master, False, False)
2576
      msg = result.fail_msg
2577
      if msg:
2578
        self.LogWarning("Could not re-enable the master role on"
2579
                        " the master, please restart manually: %s", msg)
2580

    
2581

    
2582
class LUSetClusterParams(LogicalUnit):
2583
  """Change the parameters of the cluster.
2584

2585
  """
2586
  HPATH = "cluster-modify"
2587
  HTYPE = constants.HTYPE_CLUSTER
2588
  _OP_PARAMS = [
2589
    ("vg_name", None, _TMaybeString),
2590
    ("enabled_hypervisors", None,
2591
     _TOr(_TAnd(_TListOf(_TElemOf(constants.HYPER_TYPES)), _TTrue), _TNone)),
2592
    ("hvparams", None, _TOr(_TDictOf(_TNonEmptyString, _TDict), _TNone)),
2593
    ("beparams", None, _TOr(_TDictOf(_TNonEmptyString, _TDict), _TNone)),
2594
    ("os_hvp", None, _TOr(_TDictOf(_TNonEmptyString, _TDict), _TNone)),
2595
    ("osparams", None, _TOr(_TDictOf(_TNonEmptyString, _TDict), _TNone)),
2596
    ("candidate_pool_size", None, _TOr(_TStrictPositiveInt, _TNone)),
2597
    ("uid_pool", None, _NoType),
2598
    ("add_uids", None, _NoType),
2599
    ("remove_uids", None, _NoType),
2600
    ("maintain_node_health", None, _TMaybeBool),
2601
    ("nicparams", None, _TOr(_TDict, _TNone)),
2602
    ("drbd_helper", None, _TOr(_TString, _TNone)),
2603
    ("default_iallocator", None, _TMaybeString),
2604
    ]
2605
  REQ_BGL = False
2606

    
2607
  def CheckArguments(self):
2608
    """Check parameters
2609

2610
    """
2611
    if self.op.uid_pool:
2612
      uidpool.CheckUidPool(self.op.uid_pool)
2613

    
2614
    if self.op.add_uids:
2615
      uidpool.CheckUidPool(self.op.add_uids)
2616

    
2617
    if self.op.remove_uids:
2618
      uidpool.CheckUidPool(self.op.remove_uids)
2619

    
2620
  def ExpandNames(self):
2621
    # FIXME: in the future maybe other cluster params won't require checking on
2622
    # all nodes to be modified.
2623
    self.needed_locks = {
2624
      locking.LEVEL_NODE: locking.ALL_SET,
2625
    }
2626
    self.share_locks[locking.LEVEL_NODE] = 1
2627

    
2628
  def BuildHooksEnv(self):
2629
    """Build hooks env.
2630

2631
    """
2632
    env = {
2633
      "OP_TARGET": self.cfg.GetClusterName(),
2634
      "NEW_VG_NAME": self.op.vg_name,
2635
      }
2636
    mn = self.cfg.GetMasterNode()
2637
    return env, [mn], [mn]
2638

    
2639
  def CheckPrereq(self):
2640
    """Check prerequisites.
2641

2642
    This checks whether the given params don't conflict and
2643
    if the given volume group is valid.
2644

2645
    """
2646
    if self.op.vg_name is not None and not self.op.vg_name:
2647
      if self.cfg.HasAnyDiskOfType(constants.LD_LV):
2648
        raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
2649
                                   " instances exist", errors.ECODE_INVAL)
2650

    
2651
    if self.op.drbd_helper is not None and not self.op.drbd_helper:
2652
      if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
2653
        raise errors.OpPrereqError("Cannot disable drbd helper while"
2654
                                   " drbd-based instances exist",
2655
                                   errors.ECODE_INVAL)
2656

    
2657
    node_list = self.acquired_locks[locking.LEVEL_NODE]
2658

    
2659
    # if vg_name not None, checks given volume group on all nodes
2660
    if self.op.vg_name:
2661
      vglist = self.rpc.call_vg_list(node_list)
2662
      for node in node_list:
2663
        msg = vglist[node].fail_msg
2664
        if msg:
2665
          # ignoring down node
2666
          self.LogWarning("Error while gathering data on node %s"
2667
                          " (ignoring node): %s", node, msg)
2668
          continue
2669
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2670
                                              self.op.vg_name,
2671
                                              constants.MIN_VG_SIZE)
2672
        if vgstatus:
2673
          raise errors.OpPrereqError("Error on node '%s': %s" %
2674
                                     (node, vgstatus), errors.ECODE_ENVIRON)
2675

    
2676
    if self.op.drbd_helper:
2677
      # checks given drbd helper on all nodes
2678
      helpers = self.rpc.call_drbd_helper(node_list)
2679
      for node in node_list:
2680
        ninfo = self.cfg.GetNodeInfo(node)
2681
        if ninfo.offline:
2682
          self.LogInfo("Not checking drbd helper on offline node %s", node)
2683
          continue
2684
        msg = helpers[node].fail_msg
2685
        if msg:
2686
          raise errors.OpPrereqError("Error checking drbd helper on node"
2687
                                     " '%s': %s" % (node, msg),
2688
                                     errors.ECODE_ENVIRON)
2689
        node_helper = helpers[node].payload
2690
        if node_helper != self.op.drbd_helper:
2691
          raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
2692
                                     (node, node_helper), errors.ECODE_ENVIRON)
2693

    
2694
    self.cluster = cluster = self.cfg.GetClusterInfo()
2695
    # validate params changes
2696
    if self.op.beparams:
2697
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2698
      self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
2699

    
2700
    if self.op.nicparams:
2701
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2702
      self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
2703
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
2704
      nic_errors = []
2705

    
2706
      # check all instances for consistency
2707
      for instance in self.cfg.GetAllInstancesInfo().values():
2708
        for nic_idx, nic in enumerate(instance.nics):
2709
          params_copy = copy.deepcopy(nic.nicparams)
2710
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
2711

    
2712
          # check parameter syntax
2713
          try:
2714
            objects.NIC.CheckParameterSyntax(params_filled)
2715
          except errors.ConfigurationError, err:
2716
            nic_errors.append("Instance %s, nic/%d: %s" %
2717
                              (instance.name, nic_idx, err))
2718

    
2719
          # if we're moving instances to routed, check that they have an ip
2720
          target_mode = params_filled[constants.NIC_MODE]
2721
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2722
            nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2723
                              (instance.name, nic_idx))
2724
      if nic_errors:
2725
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2726
                                   "\n".join(nic_errors))
2727

    
2728
    # hypervisor list/parameters
2729
    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
2730
    if self.op.hvparams:
2731
      for hv_name, hv_dict in self.op.hvparams.items():
2732
        if hv_name not in self.new_hvparams:
2733
          self.new_hvparams[hv_name] = hv_dict
2734
        else:
2735
          self.new_hvparams[hv_name].update(hv_dict)
2736

    
2737
    # os hypervisor parameters
2738
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2739
    if self.op.os_hvp:
2740
      for os_name, hvs in self.op.os_hvp.items():
2741
        if os_name not in self.new_os_hvp:
2742
          self.new_os_hvp[os_name] = hvs
2743
        else:
2744
          for hv_name, hv_dict in hvs.items():
2745
            if hv_name not in self.new_os_hvp[os_name]:
2746
              self.new_os_hvp[os_name][hv_name] = hv_dict
2747
            else:
2748
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
2749

    
2750
    # os parameters
2751
    self.new_osp = objects.FillDict(cluster.osparams, {})
2752
    if self.op.osparams:
2753
      for os_name, osp in self.op.osparams.items():
2754
        if os_name not in self.new_osp:
2755
          self.new_osp[os_name] = {}
2756

    
2757
        self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
2758
                                                  use_none=True)
2759

    
2760
        if not self.new_osp[os_name]:
2761
          # we removed all parameters
2762
          del self.new_osp[os_name]
2763
        else:
2764
          # check the parameter validity (remote check)
2765
          _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
2766
                         os_name, self.new_osp[os_name])
2767

    
2768
    # changes to the hypervisor list
2769
    if self.op.enabled_hypervisors is not None:
2770
      self.hv_list = self.op.enabled_hypervisors
2771
      for hv in self.hv_list:
2772
        # if the hypervisor doesn't already exist in the cluster
2773
        # hvparams, we initialize it to empty, and then (in both
2774
        # cases) we make sure to fill the defaults, as we might not
2775
        # have a complete defaults list if the hypervisor wasn't
2776
        # enabled before
2777
        if hv not in new_hvp:
2778
          new_hvp[hv] = {}
2779
        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
2780
        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
2781
    else:
2782
      self.hv_list = cluster.enabled_hypervisors
2783

    
2784
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
2785
      # either the enabled list has changed, or the parameters have, validate
2786
      for hv_name, hv_params in self.new_hvparams.items():
2787
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
2788
            (self.op.enabled_hypervisors and
2789
             hv_name in self.op.enabled_hypervisors)):
2790
          # either this is a new hypervisor, or its parameters have changed
2791
          hv_class = hypervisor.GetHypervisor(hv_name)
2792
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2793
          hv_class.CheckParameterSyntax(hv_params)
2794
          _CheckHVParams(self, node_list, hv_name, hv_params)
2795

    
2796
    if self.op.os_hvp:
2797
      # no need to check any newly-enabled hypervisors, since the
2798
      # defaults have already been checked in the above code-block
2799
      for os_name, os_hvp in self.new_os_hvp.items():
2800
        for hv_name, hv_params in os_hvp.items():
2801
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2802
          # we need to fill in the new os_hvp on top of the actual hv_p
2803
          cluster_defaults = self.new_hvparams.get(hv_name, {})
2804
          new_osp = objects.FillDict(cluster_defaults, hv_params)
2805
          hv_class = hypervisor.GetHypervisor(hv_name)
2806
          hv_class.CheckParameterSyntax(new_osp)
2807
          _CheckHVParams(self, node_list, hv_name, new_osp)
2808

    
2809
    if self.op.default_iallocator:
2810
      alloc_script = utils.FindFile(self.op.default_iallocator,
2811
                                    constants.IALLOCATOR_SEARCH_PATH,
2812
                                    os.path.isfile)
2813
      if alloc_script is None:
2814
        raise errors.OpPrereqError("Invalid default iallocator script '%s'"
2815
                                   " specified" % self.op.default_iallocator,
2816
                                   errors.ECODE_INVAL)
2817

    
2818
  def Exec(self, feedback_fn):
2819
    """Change the parameters of the cluster.
2820

2821
    """
2822
    if self.op.vg_name is not None:
2823
      new_volume = self.op.vg_name
2824
      if not new_volume:
2825
        new_volume = None
2826
      if new_volume != self.cfg.GetVGName():
2827
        self.cfg.SetVGName(new_volume)
2828
      else:
2829
        feedback_fn("Cluster LVM configuration already in desired"
2830
                    " state, not changing")
2831
    if self.op.drbd_helper is not None:
2832
      new_helper = self.op.drbd_helper
2833
      if not new_helper:
2834
        new_helper = None
2835
      if new_helper != self.cfg.GetDRBDHelper():
2836
        self.cfg.SetDRBDHelper(new_helper)
2837
      else:
2838
        feedback_fn("Cluster DRBD helper already in desired state,"
2839
                    " not changing")
2840
    if self.op.hvparams:
2841
      self.cluster.hvparams = self.new_hvparams
2842
    if self.op.os_hvp:
2843
      self.cluster.os_hvp = self.new_os_hvp
2844
    if self.op.enabled_hypervisors is not None:
2845
      self.cluster.hvparams = self.new_hvparams
2846
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2847
    if self.op.beparams:
2848
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2849
    if self.op.nicparams:
2850
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2851
    if self.op.osparams:
2852
      self.cluster.osparams = self.new_osp
2853

    
2854
    if self.op.candidate_pool_size is not None:
2855
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
2856
      # we need to update the pool size here, otherwise the save will fail
2857
      _AdjustCandidatePool(self, [])
2858

    
2859
    if self.op.maintain_node_health is not None:
2860
      self.cluster.maintain_node_health = self.op.maintain_node_health
2861

    
2862
    if self.op.add_uids is not None:
2863
      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
2864

    
2865
    if self.op.remove_uids is not None:
2866
      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
2867

    
2868
    if self.op.uid_pool is not None:
2869
      self.cluster.uid_pool = self.op.uid_pool
2870

    
2871
    if self.op.default_iallocator is not None:
2872
      self.cluster.default_iallocator = self.op.default_iallocator
2873

    
2874
    self.cfg.Update(self.cluster, feedback_fn)
2875

    
2876

    
2877
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
2878
  """Distribute additional files which are part of the cluster configuration.
2879

2880
  ConfigWriter takes care of distributing the config and ssconf files, but
2881
  there are more files which should be distributed to all nodes. This function
2882
  makes sure those are copied.
2883

2884
  @param lu: calling logical unit
2885
  @param additional_nodes: list of nodes not in the config to distribute to
2886

2887
  """
2888
  # 1. Gather target nodes
2889
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
2890
  dist_nodes = lu.cfg.GetOnlineNodeList()
2891
  if additional_nodes is not None:
2892
    dist_nodes.extend(additional_nodes)
2893
  if myself.name in dist_nodes:
2894
    dist_nodes.remove(myself.name)
2895

    
2896
  # 2. Gather files to distribute
2897
  dist_files = set([constants.ETC_HOSTS,
2898
                    constants.SSH_KNOWN_HOSTS_FILE,
2899
                    constants.RAPI_CERT_FILE,
2900
                    constants.RAPI_USERS_FILE,
2901
                    constants.CONFD_HMAC_KEY,
2902
                    constants.CLUSTER_DOMAIN_SECRET_FILE,
2903
                   ])
2904

    
2905
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
2906
  for hv_name in enabled_hypervisors:
2907
    hv_class = hypervisor.GetHypervisor(hv_name)
2908
    dist_files.update(hv_class.GetAncillaryFiles())
2909

    
2910
  # 3. Perform the files upload
2911
  for fname in dist_files:
2912
    if os.path.exists(fname):
2913
      result = lu.rpc.call_upload_file(dist_nodes, fname)
2914
      for to_node, to_result in result.items():
2915
        msg = to_result.fail_msg
2916
        if msg:
2917
          msg = ("Copy of file %s to node %s failed: %s" %
2918
                 (fname, to_node, msg))
2919
          lu.proc.LogWarning(msg)
2920

    
2921

    
2922
class LURedistributeConfig(NoHooksLU):
2923
  """Force the redistribution of cluster configuration.
2924

2925
  This is a very simple LU.
2926

2927
  """
2928
  REQ_BGL = False
2929

    
2930
  def ExpandNames(self):
2931
    self.needed_locks = {
2932
      locking.LEVEL_NODE: locking.ALL_SET,
2933
    }
2934
    self.share_locks[locking.LEVEL_NODE] = 1
2935

    
2936
  def Exec(self, feedback_fn):
2937
    """Redistribute the configuration.
2938

2939
    """
2940
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
2941
    _RedistributeAncillaryFiles(self)
2942

    
2943

    
2944
def _WaitForSync(lu, instance, disks=None, oneshot=False):
2945
  """Sleep and poll for an instance's disk to sync.
2946

2947
  """
2948
  if not instance.disks or disks is not None and not disks:
2949
    return True
2950

    
2951
  disks = _ExpandCheckDisks(instance, disks)
2952

    
2953
  if not oneshot:
2954
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
2955

    
2956
  node = instance.primary_node
2957

    
2958
  for dev in disks:
2959
    lu.cfg.SetDiskID(dev, node)
2960

    
2961
  # TODO: Convert to utils.Retry
2962

    
2963
  retries = 0
2964
  degr_retries = 10 # in seconds, as we sleep 1 second each time
2965
  while True:
2966
    max_time = 0
2967
    done = True
2968
    cumul_degraded = False
2969
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
2970
    msg = rstats.fail_msg
2971
    if msg:
2972
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
2973
      retries += 1
2974
      if retries >= 10:
2975
        raise errors.RemoteError("Can't contact node %s for mirror data,"
2976
                                 " aborting." % node)
2977
      time.sleep(6)
2978
      continue
2979
    rstats = rstats.payload
2980
    retries = 0
2981
    for i, mstat in enumerate(rstats):
2982
      if mstat is None:
2983
        lu.LogWarning("Can't compute data for node %s/%s",
2984
                           node, disks[i].iv_name)
2985
        continue
2986

    
2987
      cumul_degraded = (cumul_degraded or
2988
                        (mstat.is_degraded and mstat.sync_percent is None))
2989
      if mstat.sync_percent is not None:
2990
        done = False
2991
        if mstat.estimated_time is not None:
2992
          rem_time = ("%s remaining (estimated)" %
2993
                      utils.FormatSeconds(mstat.estimated_time))
2994
          max_time = mstat.estimated_time
2995
        else:
2996
          rem_time = "no time estimate"
2997
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
2998
                        (disks[i].iv_name, mstat.sync_percent, rem_time))
2999

    
3000
    # if we're done but degraded, let's do a few small retries, to
3001
    # make sure we see a stable and not transient situation; therefore
3002
    # we force restart of the loop
3003
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
3004
      logging.info("Degraded disks found, %d retries left", degr_retries)
3005
      degr_retries -= 1
3006
      time.sleep(1)
3007
      continue
3008

    
3009
    if done or oneshot:
3010
      break
3011

    
3012
    time.sleep(min(60, max_time))
3013

    
3014
  if done:
3015
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3016
  return not cumul_degraded
3017

    
3018

    
3019
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3020
  """Check that mirrors are not degraded.
3021

3022
  The ldisk parameter, if True, will change the test from the
3023
  is_degraded attribute (which represents overall non-ok status for
3024
  the device(s)) to the ldisk (representing the local storage status).
3025

3026
  """
3027
  lu.cfg.SetDiskID(dev, node)
3028

    
3029
  result = True
3030

    
3031
  if on_primary or dev.AssembleOnSecondary():
3032
    rstats = lu.rpc.call_blockdev_find(node, dev)
3033
    msg = rstats.fail_msg
3034
    if msg:
3035
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3036
      result = False
3037
    elif not rstats.payload:
3038
      lu.LogWarning("Can't find disk on node %s", node)
3039
      result = False
3040
    else:
3041
      if ldisk:
3042
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3043
      else:
3044
        result = result and not rstats.payload.is_degraded
3045

    
3046
  if dev.children:
3047
    for child in dev.children:
3048
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3049

    
3050
  return result
3051

    
3052

    
3053
class LUDiagnoseOS(NoHooksLU):
3054
  """Logical unit for OS diagnose/query.
3055

3056
  """
3057
  _OP_PARAMS = [
3058
    _POutputFields,
3059
    ("names", _EmptyList, _TListOf(_TNonEmptyString)),
3060
    ]
3061
  REQ_BGL = False
3062
  _FIELDS_STATIC = utils.FieldSet()
3063
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants",
3064
                                   "parameters", "api_versions")
3065

    
3066
  def CheckArguments(self):
3067
    if self.op.names:
3068
      raise errors.OpPrereqError("Selective OS query not supported",
3069
                                 errors.ECODE_INVAL)
3070

    
3071
    _CheckOutputFields(static=self._FIELDS_STATIC,
3072
                       dynamic=self._FIELDS_DYNAMIC,
3073
                       selected=self.op.output_fields)
3074

    
3075
  def ExpandNames(self):
3076
    # Lock all nodes, in shared mode
3077
    # Temporary removal of locks, should be reverted later
3078
    # TODO: reintroduce locks when they are lighter-weight
3079
    self.needed_locks = {}
3080
    #self.share_locks[locking.LEVEL_NODE] = 1
3081
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3082

    
3083
  @staticmethod
3084
  def _DiagnoseByOS(rlist):
3085
    """Remaps a per-node return list into an a per-os per-node dictionary
3086

3087
    @param rlist: a map with node names as keys and OS objects as values
3088

3089
    @rtype: dict
3090
    @return: a dictionary with osnames as keys and as value another
3091
        map, with nodes as keys and tuples of (path, status, diagnose,
3092
        variants, parameters, api_versions) as values, eg::
3093

3094
          {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
3095
                                     (/srv/..., False, "invalid api")],
3096
                           "node2": [(/srv/..., True, "", [], [])]}
3097
          }
3098

3099
    """
3100
    all_os = {}
3101
    # we build here the list of nodes that didn't fail the RPC (at RPC
3102
    # level), so that nodes with a non-responding node daemon don't
3103
    # make all OSes invalid
3104
    good_nodes = [node_name for node_name in rlist
3105
                  if not rlist[node_name].fail_msg]
3106
    for node_name, nr in rlist.items():
3107
      if nr.fail_msg or not nr.payload:
3108
        continue
3109
      for (name, path, status, diagnose, variants,
3110
           params, api_versions) in nr.payload:
3111
        if name not in all_os:
3112
          # build a list of nodes for this os containing empty lists
3113
          # for each node in node_list
3114
          all_os[name] = {}
3115
          for nname in good_nodes:
3116
            all_os[name][nname] = []
3117
        # convert params from [name, help] to (name, help)
3118
        params = [tuple(v) for v in params]
3119
        all_os[name][node_name].append((path, status, diagnose,
3120
                                        variants, params, api_versions))
3121
    return all_os
3122

    
3123
  def Exec(self, feedback_fn):
3124
    """Compute the list of OSes.
3125

3126
    """
3127
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
3128
    node_data = self.rpc.call_os_diagnose(valid_nodes)
3129
    pol = self._DiagnoseByOS(node_data)
3130
    output = []
3131

    
3132
    for os_name, os_data in pol.items():
3133
      row = []
3134
      valid = True
3135
      (variants, params, api_versions) = null_state = (set(), set(), set())
3136
      for idx, osl in enumerate(os_data.values()):
3137
        valid = bool(valid and osl and osl[0][1])
3138
        if not valid:
3139
          (variants, params, api_versions) = null_state
3140
          break
3141
        node_variants, node_params, node_api = osl[0][3:6]
3142
        if idx == 0: # first entry
3143
          variants = set(node_variants)
3144
          params = set(node_params)
3145
          api_versions = set(node_api)
3146
        else: # keep consistency
3147
          variants.intersection_update(node_variants)
3148
          params.intersection_update(node_params)
3149
          api_versions.intersection_update(node_api)
3150

    
3151
      for field in self.op.output_fields:
3152
        if field == "name":
3153
          val = os_name
3154
        elif field == "valid":
3155
          val = valid
3156
        elif field == "node_status":
3157
          # this is just a copy of the dict
3158
          val = {}
3159
          for node_name, nos_list in os_data.items():
3160
            val[node_name] = nos_list
3161
        elif field == "variants":
3162
          val = list(variants)
3163
        elif field == "parameters":
3164
          val = list(params)
3165
        elif field == "api_versions":
3166
          val = list(api_versions)
3167
        else:
3168
          raise errors.ParameterError(field)
3169
        row.append(val)
3170
      output.append(row)
3171

    
3172
    return output
3173

    
3174

    
3175
class LURemoveNode(LogicalUnit):
3176
  """Logical unit for removing a node.
3177

3178
  """
3179
  HPATH = "node-remove"
3180
  HTYPE = constants.HTYPE_NODE
3181
  _OP_PARAMS = [
3182
    _PNodeName,
3183
    ]
3184

    
3185
  def BuildHooksEnv(self):
3186
    """Build hooks env.
3187

3188
    This doesn't run on the target node in the pre phase as a failed
3189
    node would then be impossible to remove.
3190

3191
    """
3192
    env = {
3193
      "OP_TARGET": self.op.node_name,
3194
      "NODE_NAME": self.op.node_name,
3195
      }
3196
    all_nodes = self.cfg.GetNodeList()
3197
    try:
3198
      all_nodes.remove(self.op.node_name)
3199
    except ValueError:
3200
      logging.warning("Node %s which is about to be removed not found"
3201
                      " in the all nodes list", self.op.node_name)
3202
    return env, all_nodes, all_nodes
3203

    
3204
  def CheckPrereq(self):
3205
    """Check prerequisites.
3206

3207
    This checks:
3208
     - the node exists in the configuration
3209
     - it does not have primary or secondary instances
3210
     - it's not the master
3211

3212
    Any errors are signaled by raising errors.OpPrereqError.
3213

3214
    """
3215
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3216
    node = self.cfg.GetNodeInfo(self.op.node_name)
3217
    assert node is not None
3218

    
3219
    instance_list = self.cfg.GetInstanceList()
3220

    
3221
    masternode = self.cfg.GetMasterNode()
3222
    if node.name == masternode:
3223
      raise errors.OpPrereqError("Node is the master node,"
3224
                                 " you need to failover first.",
3225
                                 errors.ECODE_INVAL)
3226

    
3227
    for instance_name in instance_list:
3228
      instance = self.cfg.GetInstanceInfo(instance_name)
3229
      if node.name in instance.all_nodes:
3230
        raise errors.OpPrereqError("Instance %s is still running on the node,"
3231
                                   " please remove first." % instance_name,
3232
                                   errors.ECODE_INVAL)
3233
    self.op.node_name = node.name
3234
    self.node = node
3235

    
3236
  def Exec(self, feedback_fn):
3237
    """Removes the node from the cluster.
3238

3239
    """
3240
    node = self.node
3241
    logging.info("Stopping the node daemon and removing configs from node %s",
3242
                 node.name)
3243

    
3244
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
3245

    
3246
    # Promote nodes to master candidate as needed
3247
    _AdjustCandidatePool(self, exceptions=[node.name])
3248
    self.context.RemoveNode(node.name)
3249

    
3250
    # Run post hooks on the node before it's removed
3251
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
3252
    try:
3253
      hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
3254
    except:
3255
      # pylint: disable-msg=W0702
3256
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
3257

    
3258
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
3259
    msg = result.fail_msg
3260
    if msg:
3261
      self.LogWarning("Errors encountered on the remote node while leaving"
3262
                      " the cluster: %s", msg)
3263

    
3264
    # Remove node from our /etc/hosts
3265
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3266
      # FIXME: this should be done via an rpc call to node daemon
3267
      utils.RemoveHostFromEtcHosts(node.name)
3268
      _RedistributeAncillaryFiles(self)
3269

    
3270

    
3271
class LUQueryNodes(NoHooksLU):
3272
  """Logical unit for querying nodes.
3273

3274
  """
3275
  # pylint: disable-msg=W0142
3276
  _OP_PARAMS = [
3277
    _POutputFields,
3278
    ("names", _EmptyList, _TListOf(_TNonEmptyString)),
3279
    ("use_locking", False, _TBool),
3280
    ]
3281
  REQ_BGL = False
3282

    
3283
  _SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid",
3284
                    "master_candidate", "offline", "drained"]
3285

    
3286
  _FIELDS_DYNAMIC = utils.FieldSet(
3287
    "dtotal", "dfree",
3288
    "mtotal", "mnode", "mfree",
3289
    "bootid",
3290
    "ctotal", "cnodes", "csockets",
3291
    )
3292

    
3293
  _FIELDS_STATIC = utils.FieldSet(*[
3294
    "pinst_cnt", "sinst_cnt",
3295
    "pinst_list", "sinst_list",
3296
    "pip", "sip", "tags",
3297
    "master",
3298
    "role"] + _SIMPLE_FIELDS
3299
    )
3300

    
3301
  def CheckArguments(self):
3302
    _CheckOutputFields(static=self._FIELDS_STATIC,
3303
                       dynamic=self._FIELDS_DYNAMIC,
3304
                       selected=self.op.output_fields)
3305

    
3306
  def ExpandNames(self):
3307
    self.needed_locks = {}
3308
    self.share_locks[locking.LEVEL_NODE] = 1
3309

    
3310
    if self.op.names:
3311
      self.wanted = _GetWantedNodes(self, self.op.names)
3312
    else:
3313
      self.wanted = locking.ALL_SET
3314

    
3315
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3316
    self.do_locking = self.do_node_query and self.op.use_locking
3317
    if self.do_locking:
3318
      # if we don't request only static fields, we need to lock the nodes
3319
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
3320

    
3321
  def Exec(self, feedback_fn):
3322
    """Computes the list of nodes and their attributes.
3323

3324
    """
3325
    all_info = self.cfg.GetAllNodesInfo()
3326
    if self.do_locking:
3327
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
3328
    elif self.wanted != locking.ALL_SET:
3329
      nodenames = self.wanted
3330
      missing = set(nodenames).difference(all_info.keys())
3331
      if missing:
3332
        raise errors.OpExecError(
3333
          "Some nodes were removed before retrieving their data: %s" % missing)
3334
    else:
3335
      nodenames = all_info.keys()
3336

    
3337
    nodenames = utils.NiceSort(nodenames)
3338
    nodelist = [all_info[name] for name in nodenames]
3339

    
3340
    # begin data gathering
3341

    
3342
    if self.do_node_query:
3343
      live_data = {}
3344
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
3345
                                          self.cfg.GetHypervisorType())
3346
      for name in nodenames:
3347
        nodeinfo = node_data[name]
3348
        if not nodeinfo.fail_msg and nodeinfo.payload:
3349
          nodeinfo = nodeinfo.payload
3350
          fn = utils.TryConvert
3351
          live_data[name] = {
3352
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
3353
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
3354
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
3355
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
3356
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
3357
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
3358
            "bootid": nodeinfo.get('bootid', None),
3359
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
3360
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
3361
            }
3362
        else:
3363
          live_data[name] = {}
3364
    else:
3365
      live_data = dict.fromkeys(nodenames, {})
3366

    
3367
    node_to_primary = dict([(name, set()) for name in nodenames])
3368
    node_to_secondary = dict([(name, set()) for name in nodenames])
3369

    
3370
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
3371
                             "sinst_cnt", "sinst_list"))
3372
    if inst_fields & frozenset(self.op.output_fields):
3373
      inst_data = self.cfg.GetAllInstancesInfo()
3374

    
3375
      for inst in inst_data.values():
3376
        if inst.primary_node in node_to_primary:
3377
          node_to_primary[inst.primary_node].add(inst.name)
3378
        for secnode in inst.secondary_nodes:
3379
          if secnode in node_to_secondary:
3380
            node_to_secondary[secnode].add(inst.name)
3381

    
3382
    master_node = self.cfg.GetMasterNode()
3383

    
3384
    # end data gathering
3385

    
3386
    output = []
3387
    for node in nodelist:
3388
      node_output = []
3389
      for field in self.op.output_fields:
3390
        if field in self._SIMPLE_FIELDS:
3391
          val = getattr(node, field)
3392
        elif field == "pinst_list":
3393
          val = list(node_to_primary[node.name])
3394
        elif field == "sinst_list":
3395
          val = list(node_to_secondary[node.name])
3396
        elif field == "pinst_cnt":
3397
          val = len(node_to_primary[node.name])
3398
        elif field == "sinst_cnt":
3399
          val = len(node_to_secondary[node.name])
3400
        elif field == "pip":
3401
          val = node.primary_ip
3402
        elif field == "sip":
3403
          val = node.secondary_ip
3404
        elif field == "tags":
3405
          val = list(node.GetTags())
3406
        elif field == "master":
3407
          val = node.name == master_node
3408
        elif self._FIELDS_DYNAMIC.Matches(field):
3409
          val = live_data[node.name].get(field, None)
3410
        elif field == "role":
3411
          if node.name == master_node:
3412
            val = "M"
3413
          elif node.master_candidate:
3414
            val = "C"
3415
          elif node.drained:
3416
            val = "D"
3417
          elif node.offline:
3418
            val = "O"
3419
          else:
3420
            val = "R"
3421
        else:
3422
          raise errors.ParameterError(field)
3423
        node_output.append(val)
3424
      output.append(node_output)
3425

    
3426
    return output
3427

    
3428

    
3429
class LUQueryNodeVolumes(NoHooksLU):
3430
  """Logical unit for getting volumes on node(s).
3431

3432
  """
3433
  _OP_PARAMS = [
3434
    ("nodes", _EmptyList, _TListOf(_TNonEmptyString)),
3435
    ("output_fields", _NoDefault, _TListOf(_TNonEmptyString)),
3436
    ]
3437
  REQ_BGL = False
3438
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3439
  _FIELDS_STATIC = utils.FieldSet("node")
3440

    
3441
  def CheckArguments(self):
3442
    _CheckOutputFields(static=self._FIELDS_STATIC,
3443
                       dynamic=self._FIELDS_DYNAMIC,
3444
                       selected=self.op.output_fields)
3445

    
3446
  def ExpandNames(self):
3447
    self.needed_locks = {}
3448
    self.share_locks[locking.LEVEL_NODE] = 1
3449
    if not self.op.nodes:
3450
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3451
    else:
3452
      self.needed_locks[locking.LEVEL_NODE] = \
3453
        _GetWantedNodes(self, self.op.nodes)
3454

    
3455
  def Exec(self, feedback_fn):
3456
    """Computes the list of nodes and their attributes.
3457

3458
    """
3459
    nodenames = self.acquired_locks[locking.LEVEL_NODE]
3460
    volumes = self.rpc.call_node_volumes(nodenames)
3461

    
3462
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
3463
             in self.cfg.GetInstanceList()]
3464

    
3465
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3466

    
3467
    output = []
3468
    for node in nodenames:
3469
      nresult = volumes[node]
3470
      if nresult.offline:
3471
        continue
3472
      msg = nresult.fail_msg
3473
      if msg:
3474
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3475
        continue
3476

    
3477
      node_vols = nresult.payload[:]
3478
      node_vols.sort(key=lambda vol: vol['dev'])
3479

    
3480
      for vol in node_vols:
3481
        node_output = []
3482
        for field in self.op.output_fields:
3483
          if field == "node":
3484
            val = node
3485
          elif field == "phys":
3486
            val = vol['dev']
3487
          elif field == "vg":
3488
            val = vol['vg']
3489
          elif field == "name":
3490
            val = vol['name']
3491
          elif field == "size":
3492
            val = int(float(vol['size']))
3493
          elif field == "instance":
3494
            for inst in ilist:
3495
              if node not in lv_by_node[inst]:
3496
                continue
3497
              if vol['name'] in lv_by_node[inst][node]:
3498
                val = inst.name
3499
                break
3500
            else:
3501
              val = '-'
3502
          else:
3503
            raise errors.ParameterError(field)
3504
          node_output.append(str(val))
3505

    
3506
        output.append(node_output)
3507

    
3508
    return output
3509

    
3510

    
3511
class LUQueryNodeStorage(NoHooksLU):
3512
  """Logical unit for getting information on storage units on node(s).
3513

3514
  """
3515
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3516
  _OP_PARAMS = [
3517
    ("nodes", _EmptyList, _TListOf(_TNonEmptyString)),
3518
    ("storage_type", _NoDefault, _CheckStorageType),
3519
    ("output_fields", _NoDefault, _TListOf(_TNonEmptyString)),
3520
    ("name", None, _TMaybeString),
3521
    ]
3522
  REQ_BGL = False
3523

    
3524
  def CheckArguments(self):
3525
    _CheckOutputFields(static=self._FIELDS_STATIC,
3526
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3527
                       selected=self.op.output_fields)
3528

    
3529
  def ExpandNames(self):
3530
    self.needed_locks = {}
3531
    self.share_locks[locking.LEVEL_NODE] = 1
3532

    
3533
    if self.op.nodes:
3534
      self.needed_locks[locking.LEVEL_NODE] = \
3535
        _GetWantedNodes(self, self.op.nodes)
3536
    else:
3537
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3538

    
3539
  def Exec(self, feedback_fn):
3540
    """Computes the list of nodes and their attributes.
3541

3542
    """
3543
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3544

    
3545
    # Always get name to sort by
3546
    if constants.SF_NAME in self.op.output_fields:
3547
      fields = self.op.output_fields[:]
3548
    else:
3549
      fields = [constants.SF_NAME] + self.op.output_fields
3550

    
3551
    # Never ask for node or type as it's only known to the LU
3552
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
3553
      while extra in fields:
3554
        fields.remove(extra)
3555

    
3556
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3557
    name_idx = field_idx[constants.SF_NAME]
3558

    
3559
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3560
    data = self.rpc.call_storage_list(self.nodes,
3561
                                      self.op.storage_type, st_args,
3562
                                      self.op.name, fields)
3563

    
3564
    result = []
3565

    
3566
    for node in utils.NiceSort(self.nodes):
3567
      nresult = data[node]
3568
      if nresult.offline:
3569
        continue
3570

    
3571
      msg = nresult.fail_msg
3572
      if msg:
3573
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3574
        continue
3575

    
3576
      rows = dict([(row[name_idx], row) for row in nresult.payload])
3577

    
3578
      for name in utils.NiceSort(rows.keys()):
3579
        row = rows[name]
3580

    
3581
        out = []
3582

    
3583
        for field in self.op.output_fields:
3584
          if field == constants.SF_NODE:
3585
            val = node
3586
          elif field == constants.SF_TYPE:
3587
            val = self.op.storage_type
3588
          elif field in field_idx:
3589
            val = row[field_idx[field]]
3590
          else:
3591
            raise errors.ParameterError(field)
3592

    
3593
          out.append(val)
3594

    
3595
        result.append(out)
3596

    
3597
    return result
3598

    
3599

    
3600
class LUModifyNodeStorage(NoHooksLU):
3601
  """Logical unit for modifying a storage volume on a node.
3602

3603
  """
3604
  _OP_PARAMS = [
3605
    _PNodeName,
3606
    ("storage_type", _NoDefault, _CheckStorageType),
3607
    ("name", _NoDefault, _TNonEmptyString),
3608
    ("changes", _NoDefault, _TDict),
3609
    ]
3610
  REQ_BGL = False
3611

    
3612
  def CheckArguments(self):
3613
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3614

    
3615
    storage_type = self.op.storage_type
3616

    
3617
    try:
3618
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
3619
    except KeyError:
3620
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
3621
                                 " modified" % storage_type,
3622
                                 errors.ECODE_INVAL)
3623

    
3624
    diff = set(self.op.changes.keys()) - modifiable
3625
    if diff:
3626
      raise errors.OpPrereqError("The following fields can not be modified for"
3627
                                 " storage units of type '%s': %r" %
3628
                                 (storage_type, list(diff)),
3629
                                 errors.ECODE_INVAL)
3630

    
3631
  def ExpandNames(self):
3632
    self.needed_locks = {
3633
      locking.LEVEL_NODE: self.op.node_name,
3634
      }
3635

    
3636
  def Exec(self, feedback_fn):
3637
    """Computes the list of nodes and their attributes.
3638

3639
    """
3640
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3641
    result = self.rpc.call_storage_modify(self.op.node_name,
3642
                                          self.op.storage_type, st_args,
3643
                                          self.op.name, self.op.changes)
3644
    result.Raise("Failed to modify storage unit '%s' on %s" %
3645
                 (self.op.name, self.op.node_name))
3646

    
3647

    
3648
class LUAddNode(LogicalUnit):
3649
  """Logical unit for adding node to the cluster.
3650

3651
  """
3652
  HPATH = "node-add"
3653
  HTYPE = constants.HTYPE_NODE
3654
  _OP_PARAMS = [
3655
    _PNodeName,
3656
    ("primary_ip", None, _NoType),
3657
    ("secondary_ip", None, _TMaybeString),
3658
    ("readd", False, _TBool),
3659
    ]
3660

    
3661
  def CheckArguments(self):
3662
    # validate/normalize the node name
3663
    self.op.node_name = netutils.HostInfo.NormalizeName(self.op.node_name)
3664

    
3665
  def BuildHooksEnv(self):
3666
    """Build hooks env.
3667

3668
    This will run on all nodes before, and on all nodes + the new node after.
3669

3670
    """
3671
    env = {
3672
      "OP_TARGET": self.op.node_name,
3673
      "NODE_NAME": self.op.node_name,
3674
      "NODE_PIP": self.op.primary_ip,
3675
      "NODE_SIP": self.op.secondary_ip,
3676
      }
3677
    nodes_0 = self.cfg.GetNodeList()
3678
    nodes_1 = nodes_0 + [self.op.node_name, ]
3679
    return env, nodes_0, nodes_1
3680

    
3681
  def CheckPrereq(self):
3682
    """Check prerequisites.
3683

3684
    This checks:
3685
     - the new node is not already in the config
3686
     - it is resolvable
3687
     - its parameters (single/dual homed) matches the cluster
3688

3689
    Any errors are signaled by raising errors.OpPrereqError.
3690

3691
    """
3692
    node_name = self.op.node_name
3693
    cfg = self.cfg
3694

    
3695
    dns_data = netutils.GetHostInfo(node_name)
3696

    
3697
    node = dns_data.name
3698
    primary_ip = self.op.primary_ip = dns_data.ip
3699
    if self.op.secondary_ip is None:
3700
      self.op.secondary_ip = primary_ip
3701
    if not netutils.IsValidIP4(self.op.secondary_ip):
3702
      raise errors.OpPrereqError("Invalid secondary IP given",
3703
                                 errors.ECODE_INVAL)
3704
    secondary_ip = self.op.secondary_ip
3705

    
3706
    node_list = cfg.GetNodeList()
3707
    if not self.op.readd and node in node_list:
3708
      raise errors.OpPrereqError("Node %s is already in the configuration" %
3709
                                 node, errors.ECODE_EXISTS)
3710
    elif self.op.readd and node not in node_list:
3711
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
3712
                                 errors.ECODE_NOENT)
3713

    
3714
    self.changed_primary_ip = False
3715

    
3716
    for existing_node_name in node_list:
3717
      existing_node = cfg.GetNodeInfo(existing_node_name)
3718

    
3719
      if self.op.readd and node == existing_node_name:
3720
        if existing_node.secondary_ip != secondary_ip:
3721
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
3722
                                     " address configuration as before",
3723
                                     errors.ECODE_INVAL)
3724
        if existing_node.primary_ip != primary_ip:
3725
          self.changed_primary_ip = True
3726

    
3727
        continue
3728

    
3729
      if (existing_node.primary_ip == primary_ip or
3730
          existing_node.secondary_ip == primary_ip or
3731
          existing_node.primary_ip == secondary_ip or
3732
          existing_node.secondary_ip == secondary_ip):
3733
        raise errors.OpPrereqError("New node ip address(es) conflict with"
3734
                                   " existing node %s" % existing_node.name,
3735
                                   errors.ECODE_NOTUNIQUE)
3736

    
3737
    # check that the type of the node (single versus dual homed) is the
3738
    # same as for the master
3739
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
3740
    master_singlehomed = myself.secondary_ip == myself.primary_ip
3741
    newbie_singlehomed = secondary_ip == primary_ip
3742
    if master_singlehomed != newbie_singlehomed:
3743
      if master_singlehomed:
3744
        raise errors.OpPrereqError("The master has no private ip but the"
3745
                                   " new node has one",
3746
                                   errors.ECODE_INVAL)
3747
      else:
3748
        raise errors.OpPrereqError("The master has a private ip but the"
3749
                                   " new node doesn't have one",
3750
                                   errors.ECODE_INVAL)
3751

    
3752
    # checks reachability
3753
    if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
3754
      raise errors.OpPrereqError("Node not reachable by ping",
3755
                                 errors.ECODE_ENVIRON)
3756

    
3757
    if not newbie_singlehomed:
3758
      # check reachability from my secondary ip to newbie's secondary ip
3759
      if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
3760
                           source=myself.secondary_ip):
3761
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
3762
                                   " based ping to noded port",
3763
                                   errors.ECODE_ENVIRON)
3764

    
3765
    if self.op.readd:
3766
      exceptions = [node]
3767
    else:
3768
      exceptions = []
3769

    
3770
    self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
3771

    
3772
    if self.op.readd:
3773
      self.new_node = self.cfg.GetNodeInfo(node)
3774
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
3775
    else:
3776
      self.new_node = objects.Node(name=node,
3777
                                   primary_ip=primary_ip,
3778
                                   secondary_ip=secondary_ip,
3779
                                   master_candidate=self.master_candidate,
3780
                                   offline=False, drained=False)
3781

    
3782
  def Exec(self, feedback_fn):
3783
    """Adds the new node to the cluster.
3784

3785
    """
3786
    new_node = self.new_node
3787
    node = new_node.name
3788

    
3789
    # for re-adds, reset the offline/drained/master-candidate flags;
3790
    # we need to reset here, otherwise offline would prevent RPC calls
3791
    # later in the procedure; this also means that if the re-add
3792
    # fails, we are left with a non-offlined, broken node
3793
    if self.op.readd:
3794
      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
3795
      self.LogInfo("Readding a node, the offline/drained flags were reset")
3796
      # if we demote the node, we do cleanup later in the procedure
3797
      new_node.master_candidate = self.master_candidate
3798
      if self.changed_primary_ip:
3799
        new_node.primary_ip = self.op.primary_ip
3800

    
3801
    # notify the user about any possible mc promotion
3802
    if new_node.master_candidate:
3803
      self.LogInfo("Node will be a master candidate")
3804

    
3805
    # check connectivity
3806
    result = self.rpc.call_version([node])[node]
3807
    result.Raise("Can't get version information from node %s" % node)
3808
    if constants.PROTOCOL_VERSION == result.payload:
3809
      logging.info("Communication to node %s fine, sw version %s match",
3810
                   node, result.payload)
3811
    else:
3812
      raise errors.OpExecError("Version mismatch master version %s,"
3813
                               " node version %s" %
3814
                               (constants.PROTOCOL_VERSION, result.payload))
3815

    
3816
    # setup ssh on node
3817
    if self.cfg.GetClusterInfo().modify_ssh_setup:
3818
      logging.info("Copy ssh key to node %s", node)
3819
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
3820
      keyarray = []
3821
      keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
3822
                  constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
3823
                  priv_key, pub_key]
3824

    
3825
      for i in keyfiles:
3826
        keyarray.append(utils.ReadFile(i))
3827

    
3828
      result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
3829
                                      keyarray[2], keyarray[3], keyarray[4],
3830
                                      keyarray[5])
3831
      result.Raise("Cannot transfer ssh keys to the new node")
3832

    
3833
    # Add node to our /etc/hosts, and add key to known_hosts
3834
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3835
      # FIXME: this should be done via an rpc call to node daemon
3836
      utils.AddHostToEtcHosts(new_node.name)
3837

    
3838
    if new_node.secondary_ip != new_node.primary_ip:
3839
      result = self.rpc.call_node_has_ip_address(new_node.name,
3840
                                                 new_node.secondary_ip)
3841
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
3842
                   prereq=True, ecode=errors.ECODE_ENVIRON)
3843
      if not result.payload:
3844
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
3845
                                 " you gave (%s). Please fix and re-run this"
3846
                                 " command." % new_node.secondary_ip)
3847

    
3848
    node_verify_list = [self.cfg.GetMasterNode()]
3849
    node_verify_param = {
3850
      constants.NV_NODELIST: [node],
3851
      # TODO: do a node-net-test as well?
3852
    }
3853

    
3854
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
3855
                                       self.cfg.GetClusterName())
3856
    for verifier in node_verify_list:
3857
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
3858
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
3859
      if nl_payload:
3860
        for failed in nl_payload:
3861
          feedback_fn("ssh/hostname verification failed"
3862
                      " (checking from %s): %s" %
3863
                      (verifier, nl_payload[failed]))
3864
        raise errors.OpExecError("ssh/hostname verification failed.")
3865

    
3866
    if self.op.readd:
3867
      _RedistributeAncillaryFiles(self)
3868
      self.context.ReaddNode(new_node)
3869
      # make sure we redistribute the config
3870
      self.cfg.Update(new_node, feedback_fn)
3871
      # and make sure the new node will not have old files around
3872
      if not new_node.master_candidate:
3873
        result = self.rpc.call_node_demote_from_mc(new_node.name)
3874
        msg = result.fail_msg
3875
        if msg:
3876
          self.LogWarning("Node failed to demote itself from master"
3877
                          " candidate status: %s" % msg)
3878
    else:
3879
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
3880
      self.context.AddNode(new_node, self.proc.GetECId())
3881

    
3882

    
3883
class LUSetNodeParams(LogicalUnit):
3884
  """Modifies the parameters of a node.
3885

3886
  """
3887
  HPATH = "node-modify"
3888
  HTYPE = constants.HTYPE_NODE
3889
  _OP_PARAMS = [
3890
    _PNodeName,
3891
    ("master_candidate", None, _TMaybeBool),
3892
    ("offline", None, _TMaybeBool),
3893
    ("drained", None, _TMaybeBool),
3894
    ("auto_promote", False, _TBool),
3895
    _PForce,
3896
    ]
3897
  REQ_BGL = False
3898

    
3899
  def CheckArguments(self):
3900
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3901
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
3902
    if all_mods.count(None) == 3:
3903
      raise errors.OpPrereqError("Please pass at least one modification",
3904
                                 errors.ECODE_INVAL)
3905
    if all_mods.count(True) > 1:
3906
      raise errors.OpPrereqError("Can't set the node into more than one"
3907
                                 " state at the same time",
3908
                                 errors.ECODE_INVAL)
3909

    
3910
    # Boolean value that tells us whether we're offlining or draining the node
3911
    self.offline_or_drain = (self.op.offline == True or
3912
                             self.op.drained == True)
3913
    self.deoffline_or_drain = (self.op.offline == False or
3914
                               self.op.drained == False)
3915
    self.might_demote = (self.op.master_candidate == False or
3916
                         self.offline_or_drain)
3917

    
3918
    self.lock_all = self.op.auto_promote and self.might_demote
3919

    
3920

    
3921
  def ExpandNames(self):
3922
    if self.lock_all:
3923
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
3924
    else:
3925
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
3926

    
3927
  def BuildHooksEnv(self):
3928
    """Build hooks env.
3929

3930
    This runs on the master node.
3931

3932
    """
3933
    env = {
3934
      "OP_TARGET": self.op.node_name,
3935
      "MASTER_CANDIDATE": str(self.op.master_candidate),
3936
      "OFFLINE": str(self.op.offline),
3937
      "DRAINED": str(self.op.drained),
3938
      }
3939
    nl = [self.cfg.GetMasterNode(),
3940
          self.op.node_name]
3941
    return env, nl, nl
3942

    
3943
  def CheckPrereq(self):
3944
    """Check prerequisites.
3945

3946
    This only checks the instance list against the existing names.
3947

3948
    """
3949
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
3950

    
3951
    if (self.op.master_candidate is not None or
3952
        self.op.drained is not None or
3953
        self.op.offline is not None):
3954
      # we can't change the master's node flags
3955
      if self.op.node_name == self.cfg.GetMasterNode():
3956
        raise errors.OpPrereqError("The master role can be changed"
3957
                                   " only via masterfailover",
3958
                                   errors.ECODE_INVAL)
3959

    
3960

    
3961
    if node.master_candidate and self.might_demote and not self.lock_all:
3962
      assert not self.op.auto_promote, "auto-promote set but lock_all not"
3963
      # check if after removing the current node, we're missing master
3964
      # candidates
3965
      (mc_remaining, mc_should, _) = \
3966
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
3967
      if mc_remaining < mc_should:
3968
        raise errors.OpPrereqError("Not enough master candidates, please"
3969
                                   " pass auto_promote to allow promotion",
3970
                                   errors.ECODE_INVAL)
3971

    
3972
    if (self.op.master_candidate == True and
3973
        ((node.offline and not self.op.offline == False) or
3974
         (node.drained and not self.op.drained == False))):
3975
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
3976
                                 " to master_candidate" % node.name,
3977
                                 errors.ECODE_INVAL)
3978

    
3979
    # If we're being deofflined/drained, we'll MC ourself if needed
3980
    if (self.deoffline_or_drain and not self.offline_or_drain and not
3981
        self.op.master_candidate == True and not node.master_candidate):
3982
      self.op.master_candidate = _DecideSelfPromotion(self)
3983
      if self.op.master_candidate:
3984
        self.LogInfo("Autopromoting node to master candidate")
3985

    
3986
    return
3987

    
3988
  def Exec(self, feedback_fn):
3989
    """Modifies a node.
3990

3991
    """
3992
    node = self.node
3993

    
3994
    result = []
3995
    changed_mc = False
3996

    
3997
    if self.op.offline is not None:
3998
      node.offline = self.op.offline
3999
      result.append(("offline", str(self.op.offline)))
4000
      if self.op.offline == True:
4001
        if node.master_candidate:
4002
          node.master_candidate = False
4003
          changed_mc = True
4004
          result.append(("master_candidate", "auto-demotion due to offline"))
4005
        if node.drained:
4006
          node.drained = False
4007
          result.append(("drained", "clear drained status due to offline"))
4008

    
4009
    if self.op.master_candidate is not None:
4010
      node.master_candidate = self.op.master_candidate
4011
      changed_mc = True
4012
      result.append(("master_candidate", str(self.op.master_candidate)))
4013
      if self.op.master_candidate == False:
4014
        rrc = self.rpc.call_node_demote_from_mc(node.name)
4015
        msg = rrc.fail_msg
4016
        if msg:
4017
          self.LogWarning("Node failed to demote itself: %s" % msg)
4018

    
4019
    if self.op.drained is not None:
4020
      node.drained = self.op.drained
4021
      result.append(("drained", str(self.op.drained)))
4022
      if self.op.drained == True:
4023
        if node.master_candidate:
4024
          node.master_candidate = False
4025
          changed_mc = True
4026
          result.append(("master_candidate", "auto-demotion due to drain"))
4027
          rrc = self.rpc.call_node_demote_from_mc(node.name)
4028
          msg = rrc.fail_msg
4029
          if msg:
4030
            self.LogWarning("Node failed to demote itself: %s" % msg)
4031
        if node.offline:
4032
          node.offline = False
4033
          result.append(("offline", "clear offline status due to drain"))
4034

    
4035
    # we locked all nodes, we adjust the CP before updating this node
4036
    if self.lock_all:
4037
      _AdjustCandidatePool(self, [node.name])
4038

    
4039
    # this will trigger configuration file update, if needed
4040
    self.cfg.Update(node, feedback_fn)
4041

    
4042
    # this will trigger job queue propagation or cleanup
4043
    if changed_mc:
4044
      self.context.ReaddNode(node)
4045

    
4046
    return result
4047

    
4048

    
4049
class LUPowercycleNode(NoHooksLU):
4050
  """Powercycles a node.
4051

4052
  """
4053
  _OP_PARAMS = [
4054
    _PNodeName,
4055
    _PForce,
4056
    ]
4057
  REQ_BGL = False
4058

    
4059
  def CheckArguments(self):
4060
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4061
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
4062
      raise errors.OpPrereqError("The node is the master and the force"
4063
                                 " parameter was not set",
4064
                                 errors.ECODE_INVAL)
4065

    
4066
  def ExpandNames(self):
4067
    """Locking for PowercycleNode.
4068

4069
    This is a last-resort option and shouldn't block on other
4070
    jobs. Therefore, we grab no locks.
4071

4072
    """
4073
    self.needed_locks = {}
4074

    
4075
  def Exec(self, feedback_fn):
4076
    """Reboots a node.
4077

4078
    """
4079
    result = self.rpc.call_node_powercycle(self.op.node_name,
4080
                                           self.cfg.GetHypervisorType())
4081
    result.Raise("Failed to schedule the reboot")
4082
    return result.payload
4083

    
4084

    
4085
class LUQueryClusterInfo(NoHooksLU):
4086
  """Query cluster configuration.
4087

4088
  """
4089
  REQ_BGL = False
4090

    
4091
  def ExpandNames(self):
4092
    self.needed_locks = {}
4093

    
4094
  def Exec(self, feedback_fn):
4095
    """Return cluster config.
4096

4097
    """
4098
    cluster = self.cfg.GetClusterInfo()
4099
    os_hvp = {}
4100

    
4101
    # Filter just for enabled hypervisors
4102
    for os_name, hv_dict in cluster.os_hvp.items():
4103
      os_hvp[os_name] = {}
4104
      for hv_name, hv_params in hv_dict.items():
4105
        if hv_name in cluster.enabled_hypervisors:
4106
          os_hvp[os_name][hv_name] = hv_params
4107

    
4108
    result = {
4109
      "software_version": constants.RELEASE_VERSION,
4110
      "protocol_version": constants.PROTOCOL_VERSION,
4111
      "config_version": constants.CONFIG_VERSION,
4112
      "os_api_version": max(constants.OS_API_VERSIONS),
4113
      "export_version": constants.EXPORT_VERSION,
4114
      "architecture": (platform.architecture()[0], platform.machine()),
4115
      "name": cluster.cluster_name,
4116
      "master": cluster.master_node,
4117
      "default_hypervisor": cluster.enabled_hypervisors[0],
4118
      "enabled_hypervisors": cluster.enabled_hypervisors,
4119
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
4120
                        for hypervisor_name in cluster.enabled_hypervisors]),
4121
      "os_hvp": os_hvp,
4122
      "beparams": cluster.beparams,
4123
      "osparams": cluster.osparams,
4124
      "nicparams": cluster.nicparams,
4125
      "candidate_pool_size": cluster.candidate_pool_size,
4126
      "master_netdev": cluster.master_netdev,
4127
      "volume_group_name": cluster.volume_group_name,
4128
      "drbd_usermode_helper": cluster.drbd_usermode_helper,
4129
      "file_storage_dir": cluster.file_storage_dir,
4130
      "maintain_node_health": cluster.maintain_node_health,
4131
      "ctime": cluster.ctime,
4132
      "mtime": cluster.mtime,
4133
      "uuid": cluster.uuid,
4134
      "tags": list(cluster.GetTags()),
4135
      "uid_pool": cluster.uid_pool,
4136
      "default_iallocator": cluster.default_iallocator,
4137
      }
4138

    
4139
    return result
4140

    
4141

    
4142
class LUQueryConfigValues(NoHooksLU):
4143
  """Return configuration values.
4144

4145
  """
4146
  _OP_PARAMS = [_POutputFields]
4147
  REQ_BGL = False
4148
  _FIELDS_DYNAMIC = utils.FieldSet()
4149
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
4150
                                  "watcher_pause")
4151

    
4152
  def CheckArguments(self):
4153
    _CheckOutputFields(static=self._FIELDS_STATIC,
4154
                       dynamic=self._FIELDS_DYNAMIC,
4155
                       selected=self.op.output_fields)
4156

    
4157
  def ExpandNames(self):
4158
    self.needed_locks = {}
4159

    
4160
  def Exec(self, feedback_fn):
4161
    """Dump a representation of the cluster config to the standard output.
4162

4163
    """
4164
    values = []
4165
    for field in self.op.output_fields:
4166
      if field == "cluster_name":
4167
        entry = self.cfg.GetClusterName()
4168
      elif field == "master_node":
4169
        entry = self.cfg.GetMasterNode()
4170
      elif field == "drain_flag":
4171
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
4172
      elif field == "watcher_pause":
4173
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
4174
      else:
4175
        raise errors.ParameterError(field)
4176
      values.append(entry)
4177
    return values
4178

    
4179

    
4180
class LUActivateInstanceDisks(NoHooksLU):
4181
  """Bring up an instance's disks.
4182

4183
  """
4184
  _OP_PARAMS = [
4185
    _PInstanceName,
4186
    ("ignore_size", False, _TBool),
4187
    ]
4188
  REQ_BGL = False
4189

    
4190
  def ExpandNames(self):
4191
    self._ExpandAndLockInstance()
4192
    self.needed_locks[locking.LEVEL_NODE] = []
4193
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4194

    
4195
  def DeclareLocks(self, level):
4196
    if level == locking.LEVEL_NODE:
4197
      self._LockInstancesNodes()
4198

    
4199
  def CheckPrereq(self):
4200
    """Check prerequisites.
4201

4202
    This checks that the instance is in the cluster.
4203

4204
    """
4205
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4206
    assert self.instance is not None, \
4207
      "Cannot retrieve locked instance %s" % self.op.instance_name
4208
    _CheckNodeOnline(self, self.instance.primary_node)
4209

    
4210
  def Exec(self, feedback_fn):
4211
    """Activate the disks.
4212

4213
    """
4214
    disks_ok, disks_info = \
4215
              _AssembleInstanceDisks(self, self.instance,
4216
                                     ignore_size=self.op.ignore_size)
4217
    if not disks_ok:
4218
      raise errors.OpExecError("Cannot activate block devices")
4219

    
4220
    return disks_info
4221

    
4222

    
4223
def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
4224
                           ignore_size=False):
4225
  """Prepare the block devices for an instance.
4226

4227
  This sets up the block devices on all nodes.
4228

4229
  @type lu: L{LogicalUnit}
4230
  @param lu: the logical unit on whose behalf we execute
4231
  @type instance: L{objects.Instance}
4232
  @param instance: the instance for whose disks we assemble
4233
  @type disks: list of L{objects.Disk} or None
4234
  @param disks: which disks to assemble (or all, if None)
4235
  @type ignore_secondaries: boolean
4236
  @param ignore_secondaries: if true, errors on secondary nodes
4237
      won't result in an error return from the function
4238
  @type ignore_size: boolean
4239
  @param ignore_size: if true, the current known size of the disk
4240
      will not be used during the disk activation, useful for cases
4241
      when the size is wrong
4242
  @return: False if the operation failed, otherwise a list of
4243
      (host, instance_visible_name, node_visible_name)
4244
      with the mapping from node devices to instance devices
4245

4246
  """
4247
  device_info = []
4248
  disks_ok = True
4249
  iname = instance.name
4250
  disks = _ExpandCheckDisks(instance, disks)
4251

    
4252
  # With the two passes mechanism we try to reduce the window of
4253
  # opportunity for the race condition of switching DRBD to primary
4254
  # before handshaking occured, but we do not eliminate it
4255

    
4256
  # The proper fix would be to wait (with some limits) until the
4257
  # connection has been made and drbd transitions from WFConnection
4258
  # into any other network-connected state (Connected, SyncTarget,
4259
  # SyncSource, etc.)
4260

    
4261
  # 1st pass, assemble on all nodes in secondary mode
4262
  for inst_disk in disks:
4263
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4264
      if ignore_size:
4265
        node_disk = node_disk.Copy()
4266
        node_disk.UnsetSize()
4267
      lu.cfg.SetDiskID(node_disk, node)
4268
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
4269
      msg = result.fail_msg
4270
      if msg:
4271
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
4272
                           " (is_primary=False, pass=1): %s",
4273
                           inst_disk.iv_name, node, msg)
4274
        if not ignore_secondaries:
4275
          disks_ok = False
4276

    
4277
  # FIXME: race condition on drbd migration to primary
4278

    
4279
  # 2nd pass, do only the primary node
4280
  for inst_disk in disks:
4281
    dev_path = None
4282

    
4283
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
4284
      if node != instance.primary_node:
4285
        continue
4286
      if ignore_size:
4287
        node_disk = node_disk.Copy()
4288
        node_disk.UnsetSize()
4289
      lu.cfg.SetDiskID(node_disk, node)
4290
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
4291
      msg = result.fail_msg
4292
      if msg:
4293
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
4294
                           " (is_primary=True, pass=2): %s",
4295
                           inst_disk.iv_name, node, msg)
4296
        disks_ok = False
4297
      else:
4298
        dev_path = result.payload
4299

    
4300
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
4301

    
4302
  # leave the disks configured for the primary node
4303
  # this is a workaround that would be fixed better by
4304
  # improving the logical/physical id handling
4305
  for disk in disks:
4306
    lu.cfg.SetDiskID(disk, instance.primary_node)
4307

    
4308
  return disks_ok, device_info
4309

    
4310

    
4311
def _StartInstanceDisks(lu, instance, force):
4312
  """Start the disks of an instance.
4313

4314
  """
4315
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
4316
                                           ignore_secondaries=force)
4317
  if not disks_ok:
4318
    _ShutdownInstanceDisks(lu, instance)
4319
    if force is not None and not force:
4320
      lu.proc.LogWarning("", hint="If the message above refers to a"
4321
                         " secondary node,"
4322
                         " you can retry the operation using '--force'.")
4323
    raise errors.OpExecError("Disk consistency error")
4324

    
4325

    
4326
class LUDeactivateInstanceDisks(NoHooksLU):
4327
  """Shutdown an instance's disks.
4328

4329
  """
4330
  _OP_PARAMS = [
4331
    _PInstanceName,
4332
    ]
4333
  REQ_BGL = False
4334

    
4335
  def ExpandNames(self):
4336
    self._ExpandAndLockInstance()
4337
    self.needed_locks[locking.LEVEL_NODE] = []
4338
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4339

    
4340
  def DeclareLocks(self, level):
4341
    if level == locking.LEVEL_NODE:
4342
      self._LockInstancesNodes()
4343

    
4344
  def CheckPrereq(self):
4345
    """Check prerequisites.
4346

4347
    This checks that the instance is in the cluster.
4348

4349
    """
4350
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4351
    assert self.instance is not None, \
4352
      "Cannot retrieve locked instance %s" % self.op.instance_name
4353

    
4354
  def Exec(self, feedback_fn):
4355
    """Deactivate the disks
4356

4357
    """
4358
    instance = self.instance
4359
    _SafeShutdownInstanceDisks(self, instance)
4360

    
4361

    
4362
def _SafeShutdownInstanceDisks(lu, instance, disks=None):
4363
  """Shutdown block devices of an instance.
4364

4365
  This function checks if an instance is running, before calling
4366
  _ShutdownInstanceDisks.
4367

4368
  """
4369
  _CheckInstanceDown(lu, instance, "cannot shutdown disks")
4370
  _ShutdownInstanceDisks(lu, instance, disks=disks)
4371

    
4372

    
4373
def _ExpandCheckDisks(instance, disks):
4374
  """Return the instance disks selected by the disks list
4375

4376
  @type disks: list of L{objects.Disk} or None
4377
  @param disks: selected disks
4378
  @rtype: list of L{objects.Disk}
4379
  @return: selected instance disks to act on
4380

4381
  """
4382
  if disks is None:
4383
    return instance.disks
4384
  else:
4385
    if not set(disks).issubset(instance.disks):
4386
      raise errors.ProgrammerError("Can only act on disks belonging to the"
4387
                                   " target instance")
4388
    return disks
4389

    
4390

    
4391
def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
4392
  """Shutdown block devices of an instance.
4393

4394
  This does the shutdown on all nodes of the instance.
4395

4396
  If the ignore_primary is false, errors on the primary node are
4397
  ignored.
4398

4399
  """
4400
  all_result = True
4401
  disks = _ExpandCheckDisks(instance, disks)
4402

    
4403
  for disk in disks:
4404
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
4405
      lu.cfg.SetDiskID(top_disk, node)
4406
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
4407
      msg = result.fail_msg
4408
      if msg:
4409
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
4410
                      disk.iv_name, node, msg)
4411
        if not ignore_primary or node != instance.primary_node:
4412
          all_result = False
4413
  return all_result
4414

    
4415

    
4416
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
4417
  """Checks if a node has enough free memory.
4418

4419
  This function check if a given node has the needed amount of free
4420
  memory. In case the node has less memory or we cannot get the
4421
  information from the node, this function raise an OpPrereqError
4422
  exception.
4423

4424
  @type lu: C{LogicalUnit}
4425
  @param lu: a logical unit from which we get configuration data
4426
  @type node: C{str}
4427
  @param node: the node to check
4428
  @type reason: C{str}
4429
  @param reason: string to use in the error message
4430
  @type requested: C{int}
4431
  @param requested: the amount of memory in MiB to check for
4432
  @type hypervisor_name: C{str}
4433
  @param hypervisor_name: the hypervisor to ask for memory stats
4434
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
4435
      we cannot check the node
4436

4437
  """
4438
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
4439
  nodeinfo[node].Raise("Can't get data from node %s" % node,
4440
                       prereq=True, ecode=errors.ECODE_ENVIRON)
4441
  free_mem = nodeinfo[node].payload.get('memory_free', None)
4442
  if not isinstance(free_mem, int):
4443
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
4444
                               " was '%s'" % (node, free_mem),
4445
                               errors.ECODE_ENVIRON)
4446
  if requested > free_mem:
4447
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
4448
                               " needed %s MiB, available %s MiB" %
4449
                               (node, reason, requested, free_mem),
4450
                               errors.ECODE_NORES)
4451

    
4452

    
4453
def _CheckNodesFreeDisk(lu, nodenames, requested):
4454
  """Checks if nodes have enough free disk space in the default VG.
4455

4456
  This function check if all given nodes have the needed amount of
4457
  free disk. In case any node has less disk or we cannot get the
4458
  information from the node, this function raise an OpPrereqError
4459
  exception.
4460

4461
  @type lu: C{LogicalUnit}
4462
  @param lu: a logical unit from which we get configuration data
4463
  @type nodenames: C{list}
4464
  @param nodenames: the list of node names to check
4465
  @type requested: C{int}
4466
  @param requested: the amount of disk in MiB to check for
4467
  @raise errors.OpPrereqError: if the node doesn't have enough disk, or
4468
      we cannot check the node
4469

4470
  """
4471
  nodeinfo = lu.rpc.call_node_info(nodenames, lu.cfg.GetVGName(),
4472
                                   lu.cfg.GetHypervisorType())
4473
  for node in nodenames:
4474
    info = nodeinfo[node]
4475
    info.Raise("Cannot get current information from node %s" % node,
4476
               prereq=True, ecode=errors.ECODE_ENVIRON)
4477
    vg_free = info.payload.get("vg_free", None)
4478
    if not isinstance(vg_free, int):
4479
      raise errors.OpPrereqError("Can't compute free disk space on node %s,"
4480
                                 " result was '%s'" % (node, vg_free),
4481
                                 errors.ECODE_ENVIRON)
4482
    if requested > vg_free:
4483
      raise errors.OpPrereqError("Not enough disk space on target node %s:"
4484
                                 " required %d MiB, available %d MiB" %
4485
                                 (node, requested, vg_free),
4486
                                 errors.ECODE_NORES)
4487

    
4488

    
4489
class LUStartupInstance(LogicalUnit):
4490
  """Starts an instance.
4491

4492
  """
4493
  HPATH = "instance-start"
4494
  HTYPE = constants.HTYPE_INSTANCE
4495
  _OP_PARAMS = [
4496
    _PInstanceName,
4497
    _PForce,
4498
    ("hvparams", _EmptyDict, _TDict),
4499
    ("beparams", _EmptyDict, _TDict),
4500
    ]
4501
  REQ_BGL = False
4502

    
4503
  def CheckArguments(self):
4504
    # extra beparams
4505
    if self.op.beparams:
4506
      # fill the beparams dict
4507
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4508

    
4509
  def ExpandNames(self):
4510
    self._ExpandAndLockInstance()
4511

    
4512
  def BuildHooksEnv(self):
4513
    """Build hooks env.
4514

4515
    This runs on master, primary and secondary nodes of the instance.
4516

4517
    """
4518
    env = {
4519
      "FORCE": self.op.force,
4520
      }
4521
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4522
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4523
    return env, nl, nl
4524

    
4525
  def CheckPrereq(self):
4526
    """Check prerequisites.
4527

4528
    This checks that the instance is in the cluster.
4529

4530
    """
4531
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4532
    assert self.instance is not None, \
4533
      "Cannot retrieve locked instance %s" % self.op.instance_name
4534

    
4535
    # extra hvparams
4536
    if self.op.hvparams:
4537
      # check hypervisor parameter syntax (locally)
4538
      cluster = self.cfg.GetClusterInfo()
4539
      utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4540
      filled_hvp = cluster.FillHV(instance)
4541
      filled_hvp.update(self.op.hvparams)
4542
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
4543
      hv_type.CheckParameterSyntax(filled_hvp)
4544
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
4545

    
4546
    _CheckNodeOnline(self, instance.primary_node)
4547

    
4548
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4549
    # check bridges existence
4550
    _CheckInstanceBridgesExist(self, instance)
4551

    
4552
    remote_info = self.rpc.call_instance_info(instance.primary_node,
4553
                                              instance.name,
4554
                                              instance.hypervisor)
4555
    remote_info.Raise("Error checking node %s" % instance.primary_node,
4556
                      prereq=True, ecode=errors.ECODE_ENVIRON)
4557
    if not remote_info.payload: # not running already
4558
      _CheckNodeFreeMemory(self, instance.primary_node,
4559
                           "starting instance %s" % instance.name,
4560
                           bep[constants.BE_MEMORY], instance.hypervisor)
4561

    
4562
  def Exec(self, feedback_fn):
4563
    """Start the instance.
4564

4565
    """
4566
    instance = self.instance
4567
    force = self.op.force
4568

    
4569
    self.cfg.MarkInstanceUp(instance.name)
4570

    
4571
    node_current = instance.primary_node
4572

    
4573
    _StartInstanceDisks(self, instance, force)
4574

    
4575
    result = self.rpc.call_instance_start(node_current, instance,
4576
                                          self.op.hvparams, self.op.beparams)
4577
    msg = result.fail_msg
4578
    if msg:
4579
      _ShutdownInstanceDisks(self, instance)
4580
      raise errors.OpExecError("Could not start instance: %s" % msg)
4581

    
4582

    
4583
class LURebootInstance(LogicalUnit):
4584
  """Reboot an instance.
4585

4586
  """
4587
  HPATH = "instance-reboot"
4588
  HTYPE = constants.HTYPE_INSTANCE
4589
  _OP_PARAMS = [
4590
    _PInstanceName,
4591
    ("ignore_secondaries", False, _TBool),
4592
    ("reboot_type", _NoDefault, _TElemOf(constants.REBOOT_TYPES)),
4593
    _PShutdownTimeout,
4594
    ]
4595
  REQ_BGL = False
4596

    
4597
  def ExpandNames(self):
4598
    self._ExpandAndLockInstance()
4599

    
4600
  def BuildHooksEnv(self):
4601
    """Build hooks env.
4602

4603
    This runs on master, primary and secondary nodes of the instance.
4604

4605
    """
4606
    env = {
4607
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
4608
      "REBOOT_TYPE": self.op.reboot_type,
4609
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
4610
      }
4611
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4612
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4613
    return env, nl, nl
4614

    
4615
  def CheckPrereq(self):
4616
    """Check prerequisites.
4617

4618
    This checks that the instance is in the cluster.
4619

4620
    """
4621
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4622
    assert self.instance is not None, \
4623
      "Cannot retrieve locked instance %s" % self.op.instance_name
4624

    
4625
    _CheckNodeOnline(self, instance.primary_node)
4626

    
4627
    # check bridges existence
4628
    _CheckInstanceBridgesExist(self, instance)
4629

    
4630
  def Exec(self, feedback_fn):
4631
    """Reboot the instance.
4632

4633
    """
4634
    instance = self.instance
4635
    ignore_secondaries = self.op.ignore_secondaries
4636
    reboot_type = self.op.reboot_type
4637

    
4638
    node_current = instance.primary_node
4639

    
4640
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
4641
                       constants.INSTANCE_REBOOT_HARD]:
4642
      for disk in instance.disks:
4643
        self.cfg.SetDiskID(disk, node_current)
4644
      result = self.rpc.call_instance_reboot(node_current, instance,
4645
                                             reboot_type,
4646
                                             self.op.shutdown_timeout)
4647
      result.Raise("Could not reboot instance")
4648
    else:
4649
      result = self.rpc.call_instance_shutdown(node_current, instance,
4650
                                               self.op.shutdown_timeout)
4651
      result.Raise("Could not shutdown instance for full reboot")
4652
      _ShutdownInstanceDisks(self, instance)
4653
      _StartInstanceDisks(self, instance, ignore_secondaries)
4654
      result = self.rpc.call_instance_start(node_current, instance, None, None)
4655
      msg = result.fail_msg
4656
      if msg:
4657
        _ShutdownInstanceDisks(self, instance)
4658
        raise errors.OpExecError("Could not start instance for"
4659
                                 " full reboot: %s" % msg)
4660

    
4661
    self.cfg.MarkInstanceUp(instance.name)
4662

    
4663

    
4664
class LUShutdownInstance(LogicalUnit):
4665
  """Shutdown an instance.
4666

4667
  """
4668
  HPATH = "instance-stop"
4669
  HTYPE = constants.HTYPE_INSTANCE
4670
  _OP_PARAMS = [
4671
    _PInstanceName,
4672
    ("timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT, _TPositiveInt),
4673
    ]
4674
  REQ_BGL = False
4675

    
4676
  def ExpandNames(self):
4677
    self._ExpandAndLockInstance()
4678

    
4679
  def BuildHooksEnv(self):
4680
    """Build hooks env.
4681

4682
    This runs on master, primary and secondary nodes of the instance.
4683

4684
    """
4685
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4686
    env["TIMEOUT"] = self.op.timeout
4687
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4688
    return env, nl, nl
4689

    
4690
  def CheckPrereq(self):
4691
    """Check prerequisites.
4692

4693
    This checks that the instance is in the cluster.
4694

4695
    """
4696
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4697
    assert self.instance is not None, \
4698
      "Cannot retrieve locked instance %s" % self.op.instance_name
4699
    _CheckNodeOnline(self, self.instance.primary_node)
4700

    
4701
  def Exec(self, feedback_fn):
4702
    """Shutdown the instance.
4703

4704
    """
4705
    instance = self.instance
4706
    node_current = instance.primary_node
4707
    timeout = self.op.timeout
4708
    self.cfg.MarkInstanceDown(instance.name)
4709
    result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
4710
    msg = result.fail_msg
4711
    if msg:
4712
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
4713

    
4714
    _ShutdownInstanceDisks(self, instance)
4715

    
4716

    
4717
class LUReinstallInstance(LogicalUnit):
4718
  """Reinstall an instance.
4719

4720
  """
4721
  HPATH = "instance-reinstall"
4722
  HTYPE = constants.HTYPE_INSTANCE
4723
  _OP_PARAMS = [
4724
    _PInstanceName,
4725
    ("os_type", None, _TMaybeString),
4726
    ("force_variant", False, _TBool),
4727
    ]
4728
  REQ_BGL = False
4729

    
4730
  def ExpandNames(self):
4731
    self._ExpandAndLockInstance()
4732

    
4733
  def BuildHooksEnv(self):
4734
    """Build hooks env.
4735

4736
    This runs on master, primary and secondary nodes of the instance.
4737

4738
    """
4739
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4740
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4741
    return env, nl, nl
4742

    
4743
  def CheckPrereq(self):
4744
    """Check prerequisites.
4745

4746
    This checks that the instance is in the cluster and is not running.
4747

4748
    """
4749
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4750
    assert instance is not None, \
4751
      "Cannot retrieve locked instance %s" % self.op.instance_name
4752
    _CheckNodeOnline(self, instance.primary_node)
4753

    
4754
    if instance.disk_template == constants.DT_DISKLESS:
4755
      raise errors.OpPrereqError("Instance '%s' has no disks" %
4756
                                 self.op.instance_name,
4757
                                 errors.ECODE_INVAL)
4758
    _CheckInstanceDown(self, instance, "cannot reinstall")
4759

    
4760
    if self.op.os_type is not None:
4761
      # OS verification
4762
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
4763
      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
4764

    
4765
    self.instance = instance
4766

    
4767
  def Exec(self, feedback_fn):
4768
    """Reinstall the instance.
4769

4770
    """
4771
    inst = self.instance
4772

    
4773
    if self.op.os_type is not None:
4774
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
4775
      inst.os = self.op.os_type
4776
      self.cfg.Update(inst, feedback_fn)
4777

    
4778
    _StartInstanceDisks(self, inst, None)
4779
    try:
4780
      feedback_fn("Running the instance OS create scripts...")
4781
      # FIXME: pass debug option from opcode to backend
4782
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
4783
                                             self.op.debug_level)
4784
      result.Raise("Could not install OS for instance %s on node %s" %
4785
                   (inst.name, inst.primary_node))
4786
    finally:
4787
      _ShutdownInstanceDisks(self, inst)
4788

    
4789

    
4790
class LURecreateInstanceDisks(LogicalUnit):
4791
  """Recreate an instance's missing disks.
4792

4793
  """
4794
  HPATH = "instance-recreate-disks"
4795
  HTYPE = constants.HTYPE_INSTANCE
4796
  _OP_PARAMS = [
4797
    _PInstanceName,
4798
    ("disks", _EmptyList, _TListOf(_TPositiveInt)),
4799
    ]
4800
  REQ_BGL = False
4801

    
4802
  def ExpandNames(self):
4803
    self._ExpandAndLockInstance()
4804

    
4805
  def BuildHooksEnv(self):
4806
    """Build hooks env.
4807

4808
    This runs on master, primary and secondary nodes of the instance.
4809

4810
    """
4811
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4812
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4813
    return env, nl, nl
4814

    
4815
  def CheckPrereq(self):
4816
    """Check prerequisites.
4817

4818
    This checks that the instance is in the cluster and is not running.
4819

4820
    """
4821
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4822
    assert instance is not None, \
4823
      "Cannot retrieve locked instance %s" % self.op.instance_name
4824
    _CheckNodeOnline(self, instance.primary_node)
4825

    
4826
    if instance.disk_template == constants.DT_DISKLESS:
4827
      raise errors.OpPrereqError("Instance '%s' has no disks" %
4828
                                 self.op.instance_name, errors.ECODE_INVAL)
4829
    _CheckInstanceDown(self, instance, "cannot recreate disks")
4830

    
4831
    if not self.op.disks:
4832
      self.op.disks = range(len(instance.disks))
4833
    else:
4834
      for idx in self.op.disks:
4835
        if idx >= len(instance.disks):
4836
          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
4837
                                     errors.ECODE_INVAL)
4838

    
4839
    self.instance = instance
4840

    
4841
  def Exec(self, feedback_fn):
4842
    """Recreate the disks.
4843

4844
    """
4845
    to_skip = []
4846
    for idx, _ in enumerate(self.instance.disks):
4847
      if idx not in self.op.disks: # disk idx has not been passed in
4848
        to_skip.append(idx)
4849
        continue
4850

    
4851
    _CreateDisks(self, self.instance, to_skip=to_skip)
4852

    
4853

    
4854
class LURenameInstance(LogicalUnit):
4855
  """Rename an instance.
4856

4857
  """
4858
  HPATH = "instance-rename"
4859
  HTYPE = constants.HTYPE_INSTANCE
4860
  _OP_PARAMS = [
4861
    _PInstanceName,
4862
    ("new_name", _NoDefault, _TNonEmptyString),
4863
    ("ignore_ip", False, _TBool),
4864
    ("check_name", True, _TBool),
4865
    ]
4866

    
4867
  def BuildHooksEnv(self):
4868
    """Build hooks env.
4869

4870
    This runs on master, primary and secondary nodes of the instance.
4871

4872
    """
4873
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4874
    env["INSTANCE_NEW_NAME"] = self.op.new_name
4875
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4876
    return env, nl, nl
4877

    
4878
  def CheckPrereq(self):
4879
    """Check prerequisites.
4880

4881
    This checks that the instance is in the cluster and is not running.
4882

4883
    """
4884
    self.op.instance_name = _ExpandInstanceName(self.cfg,
4885
                                                self.op.instance_name)
4886
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4887
    assert instance is not None
4888
    _CheckNodeOnline(self, instance.primary_node)
4889
    _CheckInstanceDown(self, instance, "cannot rename")
4890
    self.instance = instance
4891

    
4892
    # new name verification
4893
    if self.op.check_name:
4894
      name_info = netutils.GetHostInfo(self.op.new_name)
4895
      self.op.new_name = name_info.name
4896

    
4897
    new_name = self.op.new_name
4898

    
4899
    instance_list = self.cfg.GetInstanceList()
4900
    if new_name in instance_list:
4901
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4902
                                 new_name, errors.ECODE_EXISTS)
4903

    
4904
    if not self.op.ignore_ip:
4905
      if netutils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
4906
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4907
                                   (name_info.ip, new_name),
4908
                                   errors.ECODE_NOTUNIQUE)
4909

    
4910
  def Exec(self, feedback_fn):
4911
    """Reinstall the instance.
4912

4913
    """
4914
    inst = self.instance
4915
    old_name = inst.name
4916

    
4917
    if inst.disk_template == constants.DT_FILE:
4918
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4919

    
4920
    self.cfg.RenameInstance(inst.name, self.op.new_name)
4921
    # Change the instance lock. This is definitely safe while we hold the BGL
4922
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
4923
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
4924

    
4925
    # re-read the instance from the configuration after rename
4926
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
4927

    
4928
    if inst.disk_template == constants.DT_FILE:
4929
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4930
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
4931
                                                     old_file_storage_dir,
4932
                                                     new_file_storage_dir)
4933
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
4934
                   " (but the instance has been renamed in Ganeti)" %
4935
                   (inst.primary_node, old_file_storage_dir,
4936
                    new_file_storage_dir))
4937

    
4938
    _StartInstanceDisks(self, inst, None)
4939
    try:
4940
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
4941
                                                 old_name, self.op.debug_level)
4942
      msg = result.fail_msg
4943
      if msg:
4944
        msg = ("Could not run OS rename script for instance %s on node %s"
4945
               " (but the instance has been renamed in Ganeti): %s" %
4946
               (inst.name, inst.primary_node, msg))
4947
        self.proc.LogWarning(msg)
4948
    finally:
4949
      _ShutdownInstanceDisks(self, inst)
4950

    
4951

    
4952
class LURemoveInstance(LogicalUnit):
4953
  """Remove an instance.
4954

4955
  """
4956
  HPATH = "instance-remove"
4957
  HTYPE = constants.HTYPE_INSTANCE
4958
  _OP_PARAMS = [
4959
    _PInstanceName,
4960
    ("ignore_failures", False, _TBool),
4961
    _PShutdownTimeout,
4962
    ]
4963
  REQ_BGL = False
4964

    
4965
  def ExpandNames(self):
4966
    self._ExpandAndLockInstance()
4967
    self.needed_locks[locking.LEVEL_NODE] = []
4968
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4969

    
4970
  def DeclareLocks(self, level):
4971
    if level == locking.LEVEL_NODE:
4972
      self._LockInstancesNodes()
4973

    
4974
  def BuildHooksEnv(self):
4975
    """Build hooks env.
4976

4977
    This runs on master, primary and secondary nodes of the instance.
4978

4979
    """
4980
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4981
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
4982
    nl = [self.cfg.GetMasterNode()]
4983
    nl_post = list(self.instance.all_nodes) + nl
4984
    return env, nl, nl_post
4985

    
4986
  def CheckPrereq(self):
4987
    """Check prerequisites.
4988

4989
    This checks that the instance is in the cluster.
4990

4991
    """
4992
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4993
    assert self.instance is not None, \
4994
      "Cannot retrieve locked instance %s" % self.op.instance_name
4995

    
4996
  def Exec(self, feedback_fn):
4997
    """Remove the instance.
4998

4999
    """
5000
    instance = self.instance
5001
    logging.info("Shutting down instance %s on node %s",
5002
                 instance.name, instance.primary_node)
5003

    
5004
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
5005
                                             self.op.shutdown_timeout)
5006
    msg = result.fail_msg
5007
    if msg:
5008
      if self.op.ignore_failures:
5009
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
5010
      else:
5011
        raise errors.OpExecError("Could not shutdown instance %s on"
5012
                                 " node %s: %s" %
5013
                                 (instance.name, instance.primary_node, msg))
5014

    
5015
    _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
5016

    
5017

    
5018
def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
5019
  """Utility function to remove an instance.
5020

5021
  """
5022
  logging.info("Removing block devices for instance %s", instance.name)
5023

    
5024
  if not _RemoveDisks(lu, instance):
5025
    if not ignore_failures:
5026
      raise errors.OpExecError("Can't remove instance's disks")
5027
    feedback_fn("Warning: can't remove instance's disks")
5028

    
5029
  logging.info("Removing instance %s out of cluster config", instance.name)
5030

    
5031
  lu.cfg.RemoveInstance(instance.name)
5032

    
5033
  assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
5034
    "Instance lock removal conflict"
5035

    
5036
  # Remove lock for the instance
5037
  lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
5038

    
5039

    
5040
class LUQueryInstances(NoHooksLU):
5041
  """Logical unit for querying instances.
5042

5043
  """
5044
  # pylint: disable-msg=W0142
5045
  _OP_PARAMS = [
5046
    ("output_fields", _NoDefault, _TListOf(_TNonEmptyString)),
5047
    ("names", _EmptyList, _TListOf(_TNonEmptyString)),
5048
    ("use_locking", False, _TBool),
5049
    ]
5050
  REQ_BGL = False
5051
  _SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
5052
                    "serial_no", "ctime", "mtime", "uuid"]
5053
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
5054
                                    "admin_state",
5055
                                    "disk_template", "ip", "mac", "bridge",
5056
                                    "nic_mode", "nic_link",
5057
                                    "sda_size", "sdb_size", "vcpus", "tags",
5058
                                    "network_port", "beparams",
5059
                                    r"(disk)\.(size)/([0-9]+)",
5060
                                    r"(disk)\.(sizes)", "disk_usage",
5061
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
5062
                                    r"(nic)\.(bridge)/([0-9]+)",
5063
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
5064
                                    r"(disk|nic)\.(count)",
5065
                                    "hvparams",
5066
                                    ] + _SIMPLE_FIELDS +
5067
                                  ["hv/%s" % name
5068
                                   for name in constants.HVS_PARAMETERS
5069
                                   if name not in constants.HVC_GLOBALS] +
5070
                                  ["be/%s" % name
5071
                                   for name in constants.BES_PARAMETERS])
5072
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state",
5073
                                   "oper_ram",
5074
                                   "oper_vcpus",
5075
                                   "status")
5076

    
5077

    
5078
  def CheckArguments(self):
5079
    _CheckOutputFields(static=self._FIELDS_STATIC,
5080
                       dynamic=self._FIELDS_DYNAMIC,
5081
                       selected=self.op.output_fields)
5082

    
5083
  def ExpandNames(self):
5084
    self.needed_locks = {}
5085
    self.share_locks[locking.LEVEL_INSTANCE] = 1
5086
    self.share_locks[locking.LEVEL_NODE] = 1
5087

    
5088
    if self.op.names:
5089
      self.wanted = _GetWantedInstances(self, self.op.names)
5090
    else:
5091
      self.wanted = locking.ALL_SET
5092

    
5093
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
5094
    self.do_locking = self.do_node_query and self.op.use_locking
5095
    if self.do_locking:
5096
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
5097
      self.needed_locks[locking.LEVEL_NODE] = []
5098
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5099

    
5100
  def DeclareLocks(self, level):
5101
    if level == locking.LEVEL_NODE and self.do_locking:
5102
      self._LockInstancesNodes()
5103

    
5104
  def Exec(self, feedback_fn):
5105
    """Computes the list of nodes and their attributes.
5106

5107
    """
5108
    # pylint: disable-msg=R0912
5109
    # way too many branches here
5110
    all_info = self.cfg.GetAllInstancesInfo()
5111
    if self.wanted == locking.ALL_SET:
5112
      # caller didn't specify instance names, so ordering is not important
5113
      if self.do_locking:
5114
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5115
      else:
5116
        instance_names = all_info.keys()
5117
      instance_names = utils.NiceSort(instance_names)
5118
    else:
5119
      # caller did specify names, so we must keep the ordering
5120
      if self.do_locking:
5121
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
5122
      else:
5123
        tgt_set = all_info.keys()
5124
      missing = set(self.wanted).difference(tgt_set)
5125
      if missing:
5126
        raise errors.OpExecError("Some instances were removed before"
5127
                                 " retrieving their data: %s" % missing)
5128
      instance_names = self.wanted
5129

    
5130
    instance_list = [all_info[iname] for iname in instance_names]
5131

    
5132
    # begin data gathering
5133

    
5134
    nodes = frozenset([inst.primary_node for inst in instance_list])
5135
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
5136

    
5137
    bad_nodes = []
5138
    off_nodes = []
5139
    if self.do_node_query:
5140
      live_data = {}
5141
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
5142
      for name in nodes:
5143
        result = node_data[name]
5144
        if result.offline:
5145
          # offline nodes will be in both lists
5146
          off_nodes.append(name)
5147
        if result.fail_msg:
5148
          bad_nodes.append(name)
5149
        else:
5150
          if result.payload:
5151
            live_data.update(result.payload)
5152
          # else no instance is alive
5153
    else:
5154
      live_data = dict([(name, {}) for name in instance_names])
5155

    
5156
    # end data gathering
5157

    
5158
    HVPREFIX = "hv/"
5159
    BEPREFIX = "be/"
5160
    output = []
5161
    cluster = self.cfg.GetClusterInfo()
5162
    for instance in instance_list:
5163
      iout = []
5164
      i_hv = cluster.FillHV(instance, skip_globals=True)
5165
      i_be = cluster.FillBE(instance)
5166
      i_nicp = [cluster.SimpleFillNIC(nic.nicparams) for nic in instance.nics]
5167
      for field in self.op.output_fields:
5168
        st_match = self._FIELDS_STATIC.Matches(field)
5169
        if field in self._SIMPLE_FIELDS:
5170
          val = getattr(instance, field)
5171
        elif field == "pnode":
5172
          val = instance.primary_node
5173
        elif field == "snodes":
5174
          val = list(instance.secondary_nodes)
5175
        elif field == "admin_state":
5176
          val = instance.admin_up
5177
        elif field == "oper_state":
5178
          if instance.primary_node in bad_nodes:
5179
            val = None
5180
          else:
5181
            val = bool(live_data.get(instance.name))
5182
        elif field == "status":
5183
          if instance.primary_node in off_nodes:
5184
            val = "ERROR_nodeoffline"
5185
          elif instance.primary_node in bad_nodes:
5186
            val = "ERROR_nodedown"
5187
          else:
5188
            running = bool(live_data.get(instance.name))
5189
            if running:
5190
              if instance.admin_up:
5191
                val = "running"
5192
              else:
5193
                val = "ERROR_up"
5194
            else:
5195
              if instance.admin_up:
5196
                val = "ERROR_down"
5197
              else:
5198
                val = "ADMIN_down"
5199
        elif field == "oper_ram":
5200
          if instance.primary_node in bad_nodes:
5201
            val = None
5202
          elif instance.name in live_data:
5203
            val = live_data[instance.name].get("memory", "?")
5204
          else:
5205
            val = "-"
5206
        elif field == "oper_vcpus":
5207
          if instance.primary_node in bad_nodes:
5208
            val = None
5209
          elif instance.name in live_data:
5210
            val = live_data[instance.name].get("vcpus", "?")
5211
          else:
5212
            val = "-"
5213
        elif field == "vcpus":
5214
          val = i_be[constants.BE_VCPUS]
5215
        elif field == "disk_template":
5216
          val = instance.disk_template
5217
        elif field == "ip":
5218
          if instance.nics:
5219
            val = instance.nics[0].ip
5220
          else:
5221
            val = None
5222
        elif field == "nic_mode":
5223
          if instance.nics:
5224
            val = i_nicp[0][constants.NIC_MODE]
5225
          else:
5226
            val = None
5227
        elif field == "nic_link":
5228
          if instance.nics:
5229
            val = i_nicp[0][constants.NIC_LINK]
5230
          else:
5231
            val = None
5232
        elif field == "bridge":
5233
          if (instance.nics and
5234
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
5235
            val = i_nicp[0][constants.NIC_LINK]
5236
          else:
5237
            val = None
5238
        elif field == "mac":
5239
          if instance.nics:
5240
            val = instance.nics[0].mac
5241
          else:
5242
            val = None
5243
        elif field == "sda_size" or field == "sdb_size":
5244
          idx = ord(field[2]) - ord('a')
5245
          try:
5246
            val = instance.FindDisk(idx).size
5247
          except errors.OpPrereqError:
5248
            val = None
5249
        elif field == "disk_usage": # total disk usage per node
5250
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
5251
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
5252
        elif field == "tags":
5253
          val = list(instance.GetTags())
5254
        elif field == "hvparams":
5255
          val = i_hv
5256
        elif (field.startswith(HVPREFIX) and
5257
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS and
5258
              field[len(HVPREFIX):] not in constants.HVC_GLOBALS):
5259
          val = i_hv.get(field[len(HVPREFIX):], None)
5260
        elif field == "beparams":
5261
          val = i_be
5262
        elif (field.startswith(BEPREFIX) and
5263
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
5264
          val = i_be.get(field[len(BEPREFIX):], None)
5265
        elif st_match and st_match.groups():
5266
          # matches a variable list
5267
          st_groups = st_match.groups()
5268
          if st_groups and st_groups[0] == "disk":
5269
            if st_groups[1] == "count":
5270
              val = len(instance.disks)
5271
            elif st_groups[1] == "sizes":
5272
              val = [disk.size for disk in instance.disks]
5273
            elif st_groups[1] == "size":
5274
              try:
5275
                val = instance.FindDisk(st_groups[2]).size
5276
              except errors.OpPrereqError:
5277
                val = None
5278
            else:
5279
              assert False, "Unhandled disk parameter"
5280
          elif st_groups[0] == "nic":
5281
            if st_groups[1] == "count":
5282
              val = len(instance.nics)
5283
            elif st_groups[1] == "macs":
5284
              val = [nic.mac for nic in instance.nics]
5285
            elif st_groups[1] == "ips":
5286
              val = [nic.ip for nic in instance.nics]
5287
            elif st_groups[1] == "modes":
5288
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
5289
            elif st_groups[1] == "links":
5290
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
5291
            elif st_groups[1] == "bridges":
5292
              val = []
5293
              for nicp in i_nicp:
5294
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
5295
                  val.append(nicp[constants.NIC_LINK])
5296
                else:
5297
                  val.append(None)
5298
            else:
5299
              # index-based item
5300
              nic_idx = int(st_groups[2])
5301
              if nic_idx >= len(instance.nics):
5302
                val = None
5303
              else:
5304
                if st_groups[1] == "mac":
5305
                  val = instance.nics[nic_idx].mac
5306
                elif st_groups[1] == "ip":
5307
                  val = instance.nics[nic_idx].ip
5308
                elif st_groups[1] == "mode":
5309
                  val = i_nicp[nic_idx][constants.NIC_MODE]
5310
                elif st_groups[1] == "link":
5311
                  val = i_nicp[nic_idx][constants.NIC_LINK]
5312
                elif st_groups[1] == "bridge":
5313
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
5314
                  if nic_mode == constants.NIC_MODE_BRIDGED:
5315
                    val = i_nicp[nic_idx][constants.NIC_LINK]
5316
                  else:
5317
                    val = None
5318
                else:
5319
                  assert False, "Unhandled NIC parameter"
5320
          else:
5321
            assert False, ("Declared but unhandled variable parameter '%s'" %
5322
                           field)
5323
        else:
5324
          assert False, "Declared but unhandled parameter '%s'" % field
5325
        iout.append(val)
5326
      output.append(iout)
5327

    
5328
    return output
5329

    
5330

    
5331
class LUFailoverInstance(LogicalUnit):
5332
  """Failover an instance.
5333

5334
  """
5335
  HPATH = "instance-failover"
5336
  HTYPE = constants.HTYPE_INSTANCE
5337
  _OP_PARAMS = [
5338
    _PInstanceName,
5339
    ("ignore_consistency", False, _TBool),
5340
    _PShutdownTimeout,
5341
    ]
5342
  REQ_BGL = False
5343

    
5344
  def ExpandNames(self):
5345
    self._ExpandAndLockInstance()
5346
    self.needed_locks[locking.LEVEL_NODE] = []
5347
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5348

    
5349
  def DeclareLocks(self, level):
5350
    if level == locking.LEVEL_NODE:
5351
      self._LockInstancesNodes()
5352

    
5353
  def BuildHooksEnv(self):
5354
    """Build hooks env.
5355

5356
    This runs on master, primary and secondary nodes of the instance.
5357

5358
    """
5359
    instance = self.instance
5360
    source_node = instance.primary_node
5361
    target_node = instance.secondary_nodes[0]
5362
    env = {
5363
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
5364
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5365
      "OLD_PRIMARY": source_node,
5366
      "OLD_SECONDARY": target_node,
5367
      "NEW_PRIMARY": target_node,
5368
      "NEW_SECONDARY": source_node,
5369
      }
5370
    env.update(_BuildInstanceHookEnvByObject(self, instance))
5371
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5372
    nl_post = list(nl)
5373
    nl_post.append(source_node)
5374
    return env, nl, nl_post
5375

    
5376
  def CheckPrereq(self):
5377
    """Check prerequisites.
5378

5379
    This checks that the instance is in the cluster.
5380

5381
    """
5382
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5383
    assert self.instance is not None, \
5384
      "Cannot retrieve locked instance %s" % self.op.instance_name
5385

    
5386
    bep = self.cfg.GetClusterInfo().FillBE(instance)
5387
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5388
      raise errors.OpPrereqError("Instance's disk layout is not"
5389
                                 " network mirrored, cannot failover.",
5390
                                 errors.ECODE_STATE)
5391

    
5392
    secondary_nodes = instance.secondary_nodes
5393
    if not secondary_nodes:
5394
      raise errors.ProgrammerError("no secondary node but using "
5395
                                   "a mirrored disk template")
5396

    
5397
    target_node = secondary_nodes[0]
5398
    _CheckNodeOnline(self, target_node)
5399
    _CheckNodeNotDrained(self, target_node)
5400
    if instance.admin_up:
5401
      # check memory requirements on the secondary node
5402
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5403
                           instance.name, bep[constants.BE_MEMORY],
5404
                           instance.hypervisor)
5405
    else:
5406
      self.LogInfo("Not checking memory on the secondary node as"
5407
                   " instance will not be started")
5408

    
5409
    # check bridge existance
5410
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5411

    
5412
  def Exec(self, feedback_fn):
5413
    """Failover an instance.
5414

5415
    The failover is done by shutting it down on its present node and
5416
    starting it on the secondary.
5417

5418
    """
5419
    instance = self.instance
5420

    
5421
    source_node = instance.primary_node
5422
    target_node = instance.secondary_nodes[0]
5423

    
5424
    if instance.admin_up:
5425
      feedback_fn("* checking disk consistency between source and target")
5426
      for dev in instance.disks:
5427
        # for drbd, these are drbd over lvm
5428
        if not _CheckDiskConsistency(self, dev, target_node, False):
5429
          if not self.op.ignore_consistency:
5430
            raise errors.OpExecError("Disk %s is degraded on target node,"
5431
                                     " aborting failover." % dev.iv_name)
5432
    else:
5433
      feedback_fn("* not checking disk consistency as instance is not running")
5434

    
5435
    feedback_fn("* shutting down instance on source node")
5436
    logging.info("Shutting down instance %s on node %s",
5437
                 instance.name, source_node)
5438

    
5439
    result = self.rpc.call_instance_shutdown(source_node, instance,
5440
                                             self.op.shutdown_timeout)
5441
    msg = result.fail_msg
5442
    if msg:
5443
      if self.op.ignore_consistency:
5444
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
5445
                             " Proceeding anyway. Please make sure node"
5446
                             " %s is down. Error details: %s",
5447
                             instance.name, source_node, source_node, msg)
5448
      else:
5449
        raise errors.OpExecError("Could not shutdown instance %s on"
5450
                                 " node %s: %s" %
5451
                                 (instance.name, source_node, msg))
5452

    
5453
    feedback_fn("* deactivating the instance's disks on source node")
5454
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
5455
      raise errors.OpExecError("Can't shut down the instance's disks.")
5456

    
5457
    instance.primary_node = target_node
5458
    # distribute new instance config to the other nodes
5459
    self.cfg.Update(instance, feedback_fn)
5460

    
5461
    # Only start the instance if it's marked as up
5462
    if instance.admin_up:
5463
      feedback_fn("* activating the instance's disks on target node")
5464
      logging.info("Starting instance %s on node %s",
5465
                   instance.name, target_node)
5466

    
5467
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5468
                                           ignore_secondaries=True)
5469
      if not disks_ok:
5470
        _ShutdownInstanceDisks(self, instance)
5471
        raise errors.OpExecError("Can't activate the instance's disks")
5472

    
5473
      feedback_fn("* starting the instance on the target node")
5474
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5475
      msg = result.fail_msg
5476
      if msg:
5477
        _ShutdownInstanceDisks(self, instance)
5478
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5479
                                 (instance.name, target_node, msg))
5480

    
5481

    
5482
class LUMigrateInstance(LogicalUnit):
5483
  """Migrate an instance.
5484

5485
  This is migration without shutting down, compared to the failover,
5486
  which is done with shutdown.
5487

5488
  """
5489
  HPATH = "instance-migrate"
5490
  HTYPE = constants.HTYPE_INSTANCE
5491
  _OP_PARAMS = [
5492
    _PInstanceName,
5493
    _PMigrationMode,
5494
    ("cleanup", False, _TBool),
5495
    ]
5496

    
5497
  REQ_BGL = False
5498

    
5499
  def ExpandNames(self):
5500
    self._ExpandAndLockInstance()
5501

    
5502
    self.needed_locks[locking.LEVEL_NODE] = []
5503
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5504

    
5505
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
5506
                                       self.op.cleanup)
5507
    self.tasklets = [self._migrater]
5508

    
5509
  def DeclareLocks(self, level):
5510
    if level == locking.LEVEL_NODE:
5511
      self._LockInstancesNodes()
5512

    
5513
  def BuildHooksEnv(self):
5514
    """Build hooks env.
5515

5516
    This runs on master, primary and secondary nodes of the instance.
5517

5518
    """
5519
    instance = self._migrater.instance
5520
    source_node = instance.primary_node
5521
    target_node = instance.secondary_nodes[0]
5522
    env = _BuildInstanceHookEnvByObject(self, instance)
5523
    env["MIGRATE_LIVE"] = self._migrater.live
5524
    env["MIGRATE_CLEANUP"] = self.op.cleanup
5525
    env.update({
5526
        "OLD_PRIMARY": source_node,
5527
        "OLD_SECONDARY": target_node,
5528
        "NEW_PRIMARY": target_node,
5529
        "NEW_SECONDARY": source_node,
5530
        })
5531
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5532
    nl_post = list(nl)
5533
    nl_post.append(source_node)
5534
    return env, nl, nl_post
5535

    
5536

    
5537
class LUMoveInstance(LogicalUnit):
5538
  """Move an instance by data-copying.
5539

5540
  """
5541
  HPATH = "instance-move"
5542
  HTYPE = constants.HTYPE_INSTANCE
5543
  _OP_PARAMS = [
5544
    _PInstanceName,
5545
    ("target_node", _NoDefault, _TNonEmptyString),
5546
    _PShutdownTimeout,
5547
    ]
5548
  REQ_BGL = False
5549

    
5550
  def ExpandNames(self):
5551
    self._ExpandAndLockInstance()
5552
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5553
    self.op.target_node = target_node
5554
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
5555
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5556

    
5557
  def DeclareLocks(self, level):
5558
    if level == locking.LEVEL_NODE:
5559
      self._LockInstancesNodes(primary_only=True)
5560

    
5561
  def BuildHooksEnv(self):
5562
    """Build hooks env.
5563

5564
    This runs on master, primary and secondary nodes of the instance.
5565

5566
    """
5567
    env = {
5568
      "TARGET_NODE": self.op.target_node,
5569
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5570
      }
5571
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5572
    nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
5573
                                       self.op.target_node]
5574
    return env, nl, nl
5575

    
5576
  def CheckPrereq(self):
5577
    """Check prerequisites.
5578

5579
    This checks that the instance is in the cluster.
5580

5581
    """
5582
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5583
    assert self.instance is not None, \
5584
      "Cannot retrieve locked instance %s" % self.op.instance_name
5585

    
5586
    node = self.cfg.GetNodeInfo(self.op.target_node)
5587
    assert node is not None, \
5588
      "Cannot retrieve locked node %s" % self.op.target_node
5589

    
5590
    self.target_node = target_node = node.name
5591

    
5592
    if target_node == instance.primary_node:
5593
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
5594
                                 (instance.name, target_node),
5595
                                 errors.ECODE_STATE)
5596

    
5597
    bep = self.cfg.GetClusterInfo().FillBE(instance)
5598

    
5599
    for idx, dsk in enumerate(instance.disks):
5600
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
5601
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
5602
                                   " cannot copy" % idx, errors.ECODE_STATE)
5603

    
5604
    _CheckNodeOnline(self, target_node)
5605
    _CheckNodeNotDrained(self, target_node)
5606

    
5607
    if instance.admin_up:
5608
      # check memory requirements on the secondary node
5609
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5610
                           instance.name, bep[constants.BE_MEMORY],
5611
                           instance.hypervisor)
5612
    else:
5613
      self.LogInfo("Not checking memory on the secondary node as"
5614
                   " instance will not be started")
5615

    
5616
    # check bridge existance
5617
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5618

    
5619
  def Exec(self, feedback_fn):
5620
    """Move an instance.
5621

5622
    The move is done by shutting it down on its present node, copying
5623
    the data over (slow) and starting it on the new node.
5624

5625
    """
5626
    instance = self.instance
5627

    
5628
    source_node = instance.primary_node
5629
    target_node = self.target_node
5630

    
5631
    self.LogInfo("Shutting down instance %s on source node %s",
5632
                 instance.name, source_node)
5633

    
5634
    result = self.rpc.call_instance_shutdown(source_node, instance,
5635
                                             self.op.shutdown_timeout)
5636
    msg = result.fail_msg
5637
    if msg:
5638
      if self.op.ignore_consistency:
5639
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
5640
                             " Proceeding anyway. Please make sure node"
5641
                             " %s is down. Error details: %s",
5642
                             instance.name, source_node, source_node, msg)
5643
      else:
5644
        raise errors.OpExecError("Could not shutdown instance %s on"
5645
                                 " node %s: %s" %
5646
                                 (instance.name, source_node, msg))
5647

    
5648
    # create the target disks
5649
    try:
5650
      _CreateDisks(self, instance, target_node=target_node)
5651
    except errors.OpExecError:
5652
      self.LogWarning("Device creation failed, reverting...")
5653
      try:
5654
        _RemoveDisks(self, instance, target_node=target_node)
5655
      finally:
5656
        self.cfg.ReleaseDRBDMinors(instance.name)
5657
        raise
5658

    
5659
    cluster_name = self.cfg.GetClusterInfo().cluster_name
5660

    
5661
    errs = []
5662
    # activate, get path, copy the data over
5663
    for idx, disk in enumerate(instance.disks):
5664
      self.LogInfo("Copying data for disk %d", idx)
5665
      result = self.rpc.call_blockdev_assemble(target_node, disk,
5666
                                               instance.name, True)
5667
      if result.fail_msg:
5668
        self.LogWarning("Can't assemble newly created disk %d: %s",
5669
                        idx, result.fail_msg)
5670
        errs.append(result.fail_msg)
5671
        break
5672
      dev_path = result.payload
5673
      result = self.rpc.call_blockdev_export(source_node, disk,
5674
                                             target_node, dev_path,
5675
                                             cluster_name)
5676
      if result.fail_msg:
5677
        self.LogWarning("Can't copy data over for disk %d: %s",
5678
                        idx, result.fail_msg)
5679
        errs.append(result.fail_msg)
5680
        break
5681

    
5682
    if errs:
5683
      self.LogWarning("Some disks failed to copy, aborting")
5684
      try:
5685
        _RemoveDisks(self, instance, target_node=target_node)
5686
      finally:
5687
        self.cfg.ReleaseDRBDMinors(instance.name)
5688
        raise errors.OpExecError("Errors during disk copy: %s" %
5689
                                 (",".join(errs),))
5690

    
5691
    instance.primary_node = target_node
5692
    self.cfg.Update(instance, feedback_fn)
5693

    
5694
    self.LogInfo("Removing the disks on the original node")
5695
    _RemoveDisks(self, instance, target_node=source_node)
5696

    
5697
    # Only start the instance if it's marked as up
5698
    if instance.admin_up:
5699
      self.LogInfo("Starting instance %s on node %s",
5700
                   instance.name, target_node)
5701

    
5702
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5703
                                           ignore_secondaries=True)
5704
      if not disks_ok:
5705
        _ShutdownInstanceDisks(self, instance)
5706
        raise errors.OpExecError("Can't activate the instance's disks")
5707

    
5708
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5709
      msg = result.fail_msg
5710
      if msg:
5711
        _ShutdownInstanceDisks(self, instance)
5712
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5713
                                 (instance.name, target_node, msg))
5714

    
5715

    
5716
class LUMigrateNode(LogicalUnit):
5717
  """Migrate all instances from a node.
5718

5719
  """
5720
  HPATH = "node-migrate"
5721
  HTYPE = constants.HTYPE_NODE
5722
  _OP_PARAMS = [
5723
    _PNodeName,
5724
    _PMigrationMode,
5725
    ]
5726
  REQ_BGL = False
5727

    
5728
  def ExpandNames(self):
5729
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5730

    
5731
    self.needed_locks = {
5732
      locking.LEVEL_NODE: [self.op.node_name],
5733
      }
5734

    
5735
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5736

    
5737
    # Create tasklets for migrating instances for all instances on this node
5738
    names = []
5739
    tasklets = []
5740

    
5741
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
5742
      logging.debug("Migrating instance %s", inst.name)
5743
      names.append(inst.name)
5744

    
5745
      tasklets.append(TLMigrateInstance(self, inst.name, False))
5746

    
5747
    self.tasklets = tasklets
5748

    
5749
    # Declare instance locks
5750
    self.needed_locks[locking.LEVEL_INSTANCE] = names
5751

    
5752
  def DeclareLocks(self, level):
5753
    if level == locking.LEVEL_NODE:
5754
      self._LockInstancesNodes()
5755

    
5756
  def BuildHooksEnv(self):
5757
    """Build hooks env.
5758

5759
    This runs on the master, the primary and all the secondaries.
5760

5761
    """
5762
    env = {
5763
      "NODE_NAME": self.op.node_name,
5764
      }
5765

    
5766
    nl = [self.cfg.GetMasterNode()]
5767

    
5768
    return (env, nl, nl)
5769

    
5770

    
5771
class TLMigrateInstance(Tasklet):
5772
  """Tasklet class for instance migration.
5773

5774
  @type live: boolean
5775
  @ivar live: whether the migration will be done live or non-live;
5776
      this variable is initalized only after CheckPrereq has run
5777

5778
  """
5779
  def __init__(self, lu, instance_name, cleanup):
5780
    """Initializes this class.
5781

5782
    """
5783
    Tasklet.__init__(self, lu)
5784

    
5785
    # Parameters
5786
    self.instance_name = instance_name
5787
    self.cleanup = cleanup
5788
    self.live = False # will be overridden later
5789

    
5790
  def CheckPrereq(self):
5791
    """Check prerequisites.
5792

5793
    This checks that the instance is in the cluster.
5794

5795
    """
5796
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
5797
    instance = self.cfg.GetInstanceInfo(instance_name)
5798
    assert instance is not None
5799

    
5800
    if instance.disk_template != constants.DT_DRBD8:
5801
      raise errors.OpPrereqError("Instance's disk layout is not"
5802
                                 " drbd8, cannot migrate.", errors.ECODE_STATE)
5803

    
5804
    secondary_nodes = instance.secondary_nodes
5805
    if not secondary_nodes:
5806
      raise errors.ConfigurationError("No secondary node but using"
5807
                                      " drbd8 disk template")
5808

    
5809
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
5810

    
5811
    target_node = secondary_nodes[0]
5812
    # check memory requirements on the secondary node
5813
    _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
5814
                         instance.name, i_be[constants.BE_MEMORY],
5815
                         instance.hypervisor)
5816

    
5817
    # check bridge existance
5818
    _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
5819

    
5820
    if not self.cleanup:
5821
      _CheckNodeNotDrained(self.lu, target_node)
5822
      result = self.rpc.call_instance_migratable(instance.primary_node,
5823
                                                 instance)
5824
      result.Raise("Can't migrate, please use failover",
5825
                   prereq=True, ecode=errors.ECODE_STATE)
5826

    
5827
    self.instance = instance
5828

    
5829
    if self.lu.op.mode is None:
5830
      # read the default value from the hypervisor
5831
      i_hv = self.cfg.GetClusterInfo().FillHV(instance, skip_globals=False)
5832
      self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
5833

    
5834
    self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
5835

    
5836
  def _WaitUntilSync(self):
5837
    """Poll with custom rpc for disk sync.
5838

5839
    This uses our own step-based rpc call.
5840

5841
    """
5842
    self.feedback_fn("* wait until resync is done")
5843
    all_done = False
5844
    while not all_done:
5845
      all_done = True
5846
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
5847
                                            self.nodes_ip,
5848
                                            self.instance.disks)
5849
      min_percent = 100
5850
      for node, nres in result.items():
5851
        nres.Raise("Cannot resync disks on node %s" % node)
5852
        node_done, node_percent = nres.payload
5853
        all_done = all_done and node_done
5854
        if node_percent is not None:
5855
          min_percent = min(min_percent, node_percent)
5856
      if not all_done:
5857
        if min_percent < 100:
5858
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
5859
        time.sleep(2)
5860

    
5861
  def _EnsureSecondary(self, node):
5862
    """Demote a node to secondary.
5863

5864
    """
5865
    self.feedback_fn("* switching node %s to secondary mode" % node)
5866

    
5867
    for dev in self.instance.disks:
5868
      self.cfg.SetDiskID(dev, node)
5869

    
5870
    result = self.rpc.call_blockdev_close(node, self.instance.name,
5871
                                          self.instance.disks)
5872
    result.Raise("Cannot change disk to secondary on node %s" % node)
5873

    
5874
  def _GoStandalone(self):
5875
    """Disconnect from the network.
5876

5877
    """
5878
    self.feedback_fn("* changing into standalone mode")
5879
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
5880
                                               self.instance.disks)
5881
    for node, nres in result.items():
5882
      nres.Raise("Cannot disconnect disks node %s" % node)
5883

    
5884
  def _GoReconnect(self, multimaster):
5885
    """Reconnect to the network.
5886

5887
    """
5888
    if multimaster:
5889
      msg = "dual-master"
5890
    else:
5891
      msg = "single-master"
5892
    self.feedback_fn("* changing disks into %s mode" % msg)
5893
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
5894
                                           self.instance.disks,
5895
                                           self.instance.name, multimaster)
5896
    for node, nres in result.items():
5897
      nres.Raise("Cannot change disks config on node %s" % node)
5898

    
5899
  def _ExecCleanup(self):
5900
    """Try to cleanup after a failed migration.
5901

5902
    The cleanup is done by:
5903
      - check that the instance is running only on one node
5904
        (and update the config if needed)
5905
      - change disks on its secondary node to secondary
5906
      - wait until disks are fully synchronized
5907
      - disconnect from the network
5908
      - change disks into single-master mode
5909
      - wait again until disks are fully synchronized
5910

5911
    """
5912
    instance = self.instance
5913
    target_node = self.target_node
5914
    source_node = self.source_node
5915

    
5916
    # check running on only one node
5917
    self.feedback_fn("* checking where the instance actually runs"
5918
                     " (if this hangs, the hypervisor might be in"
5919
                     " a bad state)")
5920
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
5921
    for node, result in ins_l.items():
5922
      result.Raise("Can't contact node %s" % node)
5923

    
5924
    runningon_source = instance.name in ins_l[source_node].payload
5925
    runningon_target = instance.name in ins_l[target_node].payload
5926

    
5927
    if runningon_source and runningon_target:
5928
      raise errors.OpExecError("Instance seems to be running on two nodes,"
5929
                               " or the hypervisor is confused. You will have"
5930
                               " to ensure manually that it runs only on one"
5931
                               " and restart this operation.")
5932

    
5933
    if not (runningon_source or runningon_target):
5934
      raise errors.OpExecError("Instance does not seem to be running at all."
5935
                               " In this case, it's safer to repair by"
5936
                               " running 'gnt-instance stop' to ensure disk"
5937
                               " shutdown, and then restarting it.")
5938

    
5939
    if runningon_target:
5940
      # the migration has actually succeeded, we need to update the config
5941
      self.feedback_fn("* instance running on secondary node (%s),"
5942
                       " updating config" % target_node)
5943
      instance.primary_node = target_node
5944
      self.cfg.Update(instance, self.feedback_fn)
5945
      demoted_node = source_node
5946
    else:
5947
      self.feedback_fn("* instance confirmed to be running on its"
5948
                       " primary node (%s)" % source_node)
5949
      demoted_node = target_node
5950

    
5951
    self._EnsureSecondary(demoted_node)
5952
    try:
5953
      self._WaitUntilSync()
5954
    except errors.OpExecError:
5955
      # we ignore here errors, since if the device is standalone, it
5956
      # won't be able to sync
5957
      pass
5958
    self._GoStandalone()
5959
    self._GoReconnect(False)
5960
    self._WaitUntilSync()
5961

    
5962
    self.feedback_fn("* done")
5963

    
5964
  def _RevertDiskStatus(self):
5965
    """Try to revert the disk status after a failed migration.
5966

5967
    """
5968
    target_node = self.target_node
5969
    try:
5970
      self._EnsureSecondary(target_node)
5971
      self._GoStandalone()
5972
      self._GoReconnect(False)
5973
      self._WaitUntilSync()
5974
    except errors.OpExecError, err:
5975
      self.lu.LogWarning("Migration failed and I can't reconnect the"
5976
                         " drives: error '%s'\n"
5977
                         "Please look and recover the instance status" %
5978
                         str(err))
5979

    
5980
  def _AbortMigration(self):
5981
    """Call the hypervisor code to abort a started migration.
5982

5983
    """
5984
    instance = self.instance
5985
    target_node = self.target_node
5986
    migration_info = self.migration_info
5987

    
5988
    abort_result = self.rpc.call_finalize_migration(target_node,
5989
                                                    instance,
5990
                                                    migration_info,
5991
                                                    False)
5992
    abort_msg = abort_result.fail_msg
5993
    if abort_msg:
5994
      logging.error("Aborting migration failed on target node %s: %s",
5995
                    target_node, abort_msg)
5996
      # Don't raise an exception here, as we stil have to try to revert the
5997
      # disk status, even if this step failed.
5998

    
5999
  def _ExecMigration(self):
6000
    """Migrate an instance.
6001

6002
    The migrate is done by:
6003
      - change the disks into dual-master mode
6004
      - wait until disks are fully synchronized again
6005
      - migrate the instance
6006
      - change disks on the new secondary node (the old primary) to secondary
6007
      - wait until disks are fully synchronized
6008
      - change disks into single-master mode
6009

6010
    """
6011
    instance = self.instance
6012
    target_node = self.target_node
6013
    source_node = self.source_node
6014

    
6015
    self.feedback_fn("* checking disk consistency between source and target")
6016
    for dev in instance.disks:
6017
      if not _CheckDiskConsistency(self.lu, dev, target_node, False):
6018
        raise errors.OpExecError("Disk %s is degraded or not fully"
6019
                                 " synchronized on target node,"
6020
                                 " aborting migrate." % dev.iv_name)
6021

    
6022
    # First get the migration information from the remote node
6023
    result = self.rpc.call_migration_info(source_node, instance)
6024
    msg = result.fail_msg
6025
    if msg:
6026
      log_err = ("Failed fetching source migration information from %s: %s" %
6027
                 (source_node, msg))
6028
      logging.error(log_err)
6029
      raise errors.OpExecError(log_err)
6030

    
6031
    self.migration_info = migration_info = result.payload
6032

    
6033
    # Then switch the disks to master/master mode
6034
    self._EnsureSecondary(target_node)
6035
    self._GoStandalone()
6036
    self._GoReconnect(True)
6037
    self._WaitUntilSync()
6038

    
6039
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
6040
    result = self.rpc.call_accept_instance(target_node,
6041
                                           instance,
6042
                                           migration_info,
6043
                                           self.nodes_ip[target_node])
6044

    
6045
    msg = result.fail_msg
6046
    if msg:
6047
      logging.error("Instance pre-migration failed, trying to revert"
6048
                    " disk status: %s", msg)
6049
      self.feedback_fn("Pre-migration failed, aborting")
6050
      self._AbortMigration()
6051
      self._RevertDiskStatus()
6052
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
6053
                               (instance.name, msg))
6054

    
6055
    self.feedback_fn("* migrating instance to %s" % target_node)
6056
    time.sleep(10)
6057
    result = self.rpc.call_instance_migrate(source_node, instance,
6058
                                            self.nodes_ip[target_node],
6059
                                            self.live)
6060
    msg = result.fail_msg
6061
    if msg:
6062
      logging.error("Instance migration failed, trying to revert"
6063
                    " disk status: %s", msg)
6064
      self.feedback_fn("Migration failed, aborting")
6065
      self._AbortMigration()
6066
      self._RevertDiskStatus()
6067
      raise errors.OpExecError("Could not migrate instance %s: %s" %
6068
                               (instance.name, msg))
6069
    time.sleep(10)
6070

    
6071
    instance.primary_node = target_node
6072
    # distribute new instance config to the other nodes
6073
    self.cfg.Update(instance, self.feedback_fn)
6074

    
6075
    result = self.rpc.call_finalize_migration(target_node,
6076
                                              instance,
6077
                                              migration_info,
6078
                                              True)
6079
    msg = result.fail_msg
6080
    if msg:
6081
      logging.error("Instance migration succeeded, but finalization failed:"
6082
                    " %s", msg)
6083
      raise errors.OpExecError("Could not finalize instance migration: %s" %
6084
                               msg)
6085

    
6086
    self._EnsureSecondary(source_node)
6087
    self._WaitUntilSync()
6088
    self._GoStandalone()
6089
    self._GoReconnect(False)
6090
    self._WaitUntilSync()
6091

    
6092
    self.feedback_fn("* done")
6093

    
6094
  def Exec(self, feedback_fn):
6095
    """Perform the migration.
6096

6097
    """
6098
    feedback_fn("Migrating instance %s" % self.instance.name)
6099

    
6100
    self.feedback_fn = feedback_fn
6101

    
6102
    self.source_node = self.instance.primary_node
6103
    self.target_node = self.instance.secondary_nodes[0]
6104
    self.all_nodes = [self.source_node, self.target_node]
6105
    self.nodes_ip = {
6106
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
6107
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
6108
      }
6109

    
6110
    if self.cleanup:
6111
      return self._ExecCleanup()
6112
    else:
6113
      return self._ExecMigration()
6114

    
6115

    
6116
def _CreateBlockDev(lu, node, instance, device, force_create,
6117
                    info, force_open):
6118
  """Create a tree of block devices on a given node.
6119

6120
  If this device type has to be created on secondaries, create it and
6121
  all its children.
6122

6123
  If not, just recurse to children keeping the same 'force' value.
6124

6125
  @param lu: the lu on whose behalf we execute
6126
  @param node: the node on which to create the device
6127
  @type instance: L{objects.Instance}
6128
  @param instance: the instance which owns the device
6129
  @type device: L{objects.Disk}
6130
  @param device: the device to create
6131
  @type force_create: boolean
6132
  @param force_create: whether to force creation of this device; this
6133
      will be change to True whenever we find a device which has
6134
      CreateOnSecondary() attribute
6135
  @param info: the extra 'metadata' we should attach to the device
6136
      (this will be represented as a LVM tag)
6137
  @type force_open: boolean
6138
  @param force_open: this parameter will be passes to the
6139
      L{backend.BlockdevCreate} function where it specifies
6140
      whether we run on primary or not, and it affects both
6141
      the child assembly and the device own Open() execution
6142

6143
  """
6144
  if device.CreateOnSecondary():
6145
    force_create = True
6146

    
6147
  if device.children:
6148
    for child in device.children:
6149
      _CreateBlockDev(lu, node, instance, child, force_create,
6150
                      info, force_open)
6151

    
6152
  if not force_create:
6153
    return
6154

    
6155
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
6156

    
6157

    
6158
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
6159
  """Create a single block device on a given node.
6160

6161
  This will not recurse over children of the device, so they must be
6162
  created in advance.
6163

6164
  @param lu: the lu on whose behalf we execute
6165
  @param node: the node on which to create the device
6166
  @type instance: L{objects.Instance}
6167
  @param instance: the instance which owns the device
6168
  @type device: L{objects.Disk}
6169
  @param device: the device to create
6170
  @param info: the extra 'metadata' we should attach to the device
6171
      (this will be represented as a LVM tag)
6172
  @type force_open: boolean
6173
  @param force_open: this parameter will be passes to the
6174
      L{backend.BlockdevCreate} function where it specifies
6175
      whether we run on primary or not, and it affects both
6176
      the child assembly and the device own Open() execution
6177

6178
  """
6179
  lu.cfg.SetDiskID(device, node)
6180
  result = lu.rpc.call_blockdev_create(node, device, device.size,
6181
                                       instance.name, force_open, info)
6182
  result.Raise("Can't create block device %s on"
6183
               " node %s for instance %s" % (device, node, instance.name))
6184
  if device.physical_id is None:
6185
    device.physical_id = result.payload
6186

    
6187

    
6188
def _GenerateUniqueNames(lu, exts):
6189
  """Generate a suitable LV name.
6190

6191
  This will generate a logical volume name for the given instance.
6192

6193
  """
6194
  results = []
6195
  for val in exts:
6196
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
6197
    results.append("%s%s" % (new_id, val))
6198
  return results
6199

    
6200

    
6201
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
6202
                         p_minor, s_minor):
6203
  """Generate a drbd8 device complete with its children.
6204

6205
  """
6206
  port = lu.cfg.AllocatePort()
6207
  vgname = lu.cfg.GetVGName()
6208
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
6209
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
6210
                          logical_id=(vgname, names[0]))
6211
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6212
                          logical_id=(vgname, names[1]))
6213
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
6214
                          logical_id=(primary, secondary, port,
6215
                                      p_minor, s_minor,
6216
                                      shared_secret),
6217
                          children=[dev_data, dev_meta],
6218
                          iv_name=iv_name)
6219
  return drbd_dev
6220

    
6221

    
6222
def _GenerateDiskTemplate(lu, template_name,
6223
                          instance_name, primary_node,
6224
                          secondary_nodes, disk_info,
6225
                          file_storage_dir, file_driver,
6226
                          base_index):
6227
  """Generate the entire disk layout for a given template type.
6228

6229
  """
6230
  #TODO: compute space requirements
6231

    
6232
  vgname = lu.cfg.GetVGName()
6233
  disk_count = len(disk_info)
6234
  disks = []
6235
  if template_name == constants.DT_DISKLESS:
6236
    pass
6237
  elif template_name == constants.DT_PLAIN:
6238
    if len(secondary_nodes) != 0:
6239
      raise errors.ProgrammerError("Wrong template configuration")
6240

    
6241
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6242
                                      for i in range(disk_count)])
6243
    for idx, disk in enumerate(disk_info):
6244
      disk_index = idx + base_index
6245
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
6246
                              logical_id=(vgname, names[idx]),
6247
                              iv_name="disk/%d" % disk_index,
6248
                              mode=disk["mode"])
6249
      disks.append(disk_dev)
6250
  elif template_name == constants.DT_DRBD8:
6251
    if len(secondary_nodes) != 1:
6252
      raise errors.ProgrammerError("Wrong template configuration")
6253
    remote_node = secondary_nodes[0]
6254
    minors = lu.cfg.AllocateDRBDMinor(
6255
      [primary_node, remote_node] * len(disk_info), instance_name)
6256

    
6257
    names = []
6258
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
6259
                                               for i in range(disk_count)]):
6260
      names.append(lv_prefix + "_data")
6261
      names.append(lv_prefix + "_meta")
6262
    for idx, disk in enumerate(disk_info):
6263
      disk_index = idx + base_index
6264
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
6265
                                      disk["size"], names[idx*2:idx*2+2],
6266
                                      "disk/%d" % disk_index,
6267
                                      minors[idx*2], minors[idx*2+1])
6268
      disk_dev.mode = disk["mode"]
6269
      disks.append(disk_dev)
6270
  elif template_name == constants.DT_FILE:
6271
    if len(secondary_nodes) != 0:
6272
      raise errors.ProgrammerError("Wrong template configuration")
6273

    
6274
    _RequireFileStorage()
6275

    
6276
    for idx, disk in enumerate(disk_info):
6277
      disk_index = idx + base_index
6278
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
6279
                              iv_name="disk/%d" % disk_index,
6280
                              logical_id=(file_driver,
6281
                                          "%s/disk%d" % (file_storage_dir,
6282
                                                         disk_index)),
6283
                              mode=disk["mode"])
6284
      disks.append(disk_dev)
6285
  else:
6286
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
6287
  return disks
6288

    
6289

    
6290
def _GetInstanceInfoText(instance):
6291
  """Compute that text that should be added to the disk's metadata.
6292

6293
  """
6294
  return "originstname+%s" % instance.name
6295

    
6296

    
6297
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
6298
  """Create all disks for an instance.
6299

6300
  This abstracts away some work from AddInstance.
6301

6302
  @type lu: L{LogicalUnit}
6303
  @param lu: the logical unit on whose behalf we execute
6304
  @type instance: L{objects.Instance}
6305
  @param instance: the instance whose disks we should create
6306
  @type to_skip: list
6307
  @param to_skip: list of indices to skip
6308
  @type target_node: string
6309
  @param target_node: if passed, overrides the target node for creation
6310
  @rtype: boolean
6311
  @return: the success of the creation
6312

6313
  """
6314
  info = _GetInstanceInfoText(instance)
6315
  if target_node is None:
6316
    pnode = instance.primary_node
6317
    all_nodes = instance.all_nodes
6318
  else:
6319
    pnode = target_node
6320
    all_nodes = [pnode]
6321

    
6322
  if instance.disk_template == constants.DT_FILE:
6323
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6324
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
6325

    
6326
    result.Raise("Failed to create directory '%s' on"
6327
                 " node %s" % (file_storage_dir, pnode))
6328

    
6329
  # Note: this needs to be kept in sync with adding of disks in
6330
  # LUSetInstanceParams
6331
  for idx, device in enumerate(instance.disks):
6332
    if to_skip and idx in to_skip:
6333
      continue
6334
    logging.info("Creating volume %s for instance %s",
6335
                 device.iv_name, instance.name)
6336
    #HARDCODE
6337
    for node in all_nodes:
6338
      f_create = node == pnode
6339
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
6340

    
6341

    
6342
def _RemoveDisks(lu, instance, target_node=None):
6343
  """Remove all disks for an instance.
6344

6345
  This abstracts away some work from `AddInstance()` and
6346
  `RemoveInstance()`. Note that in case some of the devices couldn't
6347
  be removed, the removal will continue with the other ones (compare
6348
  with `_CreateDisks()`).
6349

6350
  @type lu: L{LogicalUnit}
6351
  @param lu: the logical unit on whose behalf we execute
6352
  @type instance: L{objects.Instance}
6353
  @param instance: the instance whose disks we should remove
6354
  @type target_node: string
6355
  @param target_node: used to override the node on which to remove the disks
6356
  @rtype: boolean
6357
  @return: the success of the removal
6358

6359
  """
6360
  logging.info("Removing block devices for instance %s", instance.name)
6361

    
6362
  all_result = True
6363
  for device in instance.disks:
6364
    if target_node:
6365
      edata = [(target_node, device)]
6366
    else:
6367
      edata = device.ComputeNodeTree(instance.primary_node)
6368
    for node, disk in edata:
6369
      lu.cfg.SetDiskID(disk, node)
6370
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
6371
      if msg:
6372
        lu.LogWarning("Could not remove block device %s on node %s,"
6373
                      " continuing anyway: %s", device.iv_name, node, msg)
6374
        all_result = False
6375

    
6376
  if instance.disk_template == constants.DT_FILE:
6377
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
6378
    if target_node:
6379
      tgt = target_node
6380
    else:
6381
      tgt = instance.primary_node
6382
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
6383
    if result.fail_msg:
6384
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
6385
                    file_storage_dir, instance.primary_node, result.fail_msg)
6386
      all_result = False
6387

    
6388
  return all_result
6389

    
6390

    
6391
def _ComputeDiskSize(disk_template, disks):
6392
  """Compute disk size requirements in the volume group
6393

6394
  """
6395
  # Required free disk space as a function of disk and swap space
6396
  req_size_dict = {
6397
    constants.DT_DISKLESS: None,
6398
    constants.DT_PLAIN: sum(d["size"] for d in disks),
6399
    # 128 MB are added for drbd metadata for each disk
6400
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
6401
    constants.DT_FILE: None,
6402
  }
6403

    
6404
  if disk_template not in req_size_dict:
6405
    raise errors.ProgrammerError("Disk template '%s' size requirement"
6406
                                 " is unknown" %  disk_template)
6407

    
6408
  return req_size_dict[disk_template]
6409

    
6410

    
6411
def _CheckHVParams(lu, nodenames, hvname, hvparams):
6412
  """Hypervisor parameter validation.
6413

6414
  This function abstract the hypervisor parameter validation to be
6415
  used in both instance create and instance modify.
6416

6417
  @type lu: L{LogicalUnit}
6418
  @param lu: the logical unit for which we check
6419
  @type nodenames: list
6420
  @param nodenames: the list of nodes on which we should check
6421
  @type hvname: string
6422
  @param hvname: the name of the hypervisor we should use
6423
  @type hvparams: dict
6424
  @param hvparams: the parameters which we need to check
6425
  @raise errors.OpPrereqError: if the parameters are not valid
6426

6427
  """
6428
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
6429
                                                  hvname,
6430
                                                  hvparams)
6431
  for node in nodenames:
6432
    info = hvinfo[node]
6433
    if info.offline:
6434
      continue
6435
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
6436

    
6437

    
6438
def _CheckOSParams(lu, required, nodenames, osname, osparams):
6439
  """OS parameters validation.
6440

6441
  @type lu: L{LogicalUnit}
6442
  @param lu: the logical unit for which we check
6443
  @type required: boolean
6444
  @param required: whether the validation should fail if the OS is not
6445
      found
6446
  @type nodenames: list
6447
  @param nodenames: the list of nodes on which we should check
6448
  @type osname: string
6449
  @param osname: the name of the hypervisor we should use
6450
  @type osparams: dict
6451
  @param osparams: the parameters which we need to check
6452
  @raise errors.OpPrereqError: if the parameters are not valid
6453

6454
  """
6455
  result = lu.rpc.call_os_validate(required, nodenames, osname,
6456
                                   [constants.OS_VALIDATE_PARAMETERS],
6457
                                   osparams)
6458
  for node, nres in result.items():
6459
    # we don't check for offline cases since this should be run only
6460
    # against the master node and/or an instance's nodes
6461
    nres.Raise("OS Parameters validation failed on node %s" % node)
6462
    if not nres.payload:
6463
      lu.LogInfo("OS %s not found on node %s, validation skipped",
6464
                 osname, node)
6465

    
6466

    
6467
class LUCreateInstance(LogicalUnit):
6468
  """Create an instance.
6469

6470
  """
6471
  HPATH = "instance-add"
6472
  HTYPE = constants.HTYPE_INSTANCE
6473
  _OP_PARAMS = [
6474
    _PInstanceName,
6475
    ("mode", _NoDefault, _TElemOf(constants.INSTANCE_CREATE_MODES)),
6476
    ("start", True, _TBool),
6477
    ("wait_for_sync", True, _TBool),
6478
    ("ip_check", True, _TBool),
6479
    ("name_check", True, _TBool),
6480
    ("disks", _NoDefault, _TListOf(_TDict)),
6481
    ("nics", _NoDefault, _TListOf(_TDict)),
6482
    ("hvparams", _EmptyDict, _TDict),
6483
    ("beparams", _EmptyDict, _TDict),
6484
    ("osparams", _EmptyDict, _TDict),
6485
    ("no_install", None, _TMaybeBool),
6486
    ("os_type", None, _TMaybeString),
6487
    ("force_variant", False, _TBool),
6488
    ("source_handshake", None, _TOr(_TList, _TNone)),
6489
    ("source_x509_ca", None, _TOr(_TList, _TNone)),
6490
    ("source_instance_name", None, _TMaybeString),
6491
    ("src_node", None, _TMaybeString),
6492
    ("src_path", None, _TMaybeString),
6493
    ("pnode", None, _TMaybeString),
6494
    ("snode", None, _TMaybeString),
6495
    ("iallocator", None, _TMaybeString),
6496
    ("hypervisor", None, _TMaybeString),
6497
    ("disk_template", _NoDefault, _CheckDiskTemplate),
6498
    ("identify_defaults", False, _TBool),
6499
    ("file_driver", None, _TOr(_TNone, _TElemOf(constants.FILE_DRIVER))),
6500
    ("file_storage_dir", None, _TMaybeString),
6501
    ("dry_run", False, _TBool),
6502
    ]
6503
  REQ_BGL = False
6504

    
6505
  def CheckArguments(self):
6506
    """Check arguments.
6507

6508
    """
6509
    # do not require name_check to ease forward/backward compatibility
6510
    # for tools
6511
    if self.op.no_install and self.op.start:
6512
      self.LogInfo("No-installation mode selected, disabling startup")
6513
      self.op.start = False
6514
    # validate/normalize the instance name
6515
    self.op.instance_name = \
6516
      netutils.HostInfo.NormalizeName(self.op.instance_name)
6517

    
6518
    if self.op.ip_check and not self.op.name_check:
6519
      # TODO: make the ip check more flexible and not depend on the name check
6520
      raise errors.OpPrereqError("Cannot do ip checks without a name check",
6521
                                 errors.ECODE_INVAL)
6522

    
6523
    # check nics' parameter names
6524
    for nic in self.op.nics:
6525
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
6526

    
6527
    # check disks. parameter names and consistent adopt/no-adopt strategy
6528
    has_adopt = has_no_adopt = False
6529
    for disk in self.op.disks:
6530
      utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
6531
      if "adopt" in disk:
6532
        has_adopt = True
6533
      else:
6534
        has_no_adopt = True
6535
    if has_adopt and has_no_adopt:
6536
      raise errors.OpPrereqError("Either all disks are adopted or none is",
6537
                                 errors.ECODE_INVAL)
6538
    if has_adopt:
6539
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
6540
        raise errors.OpPrereqError("Disk adoption is not supported for the"
6541
                                   " '%s' disk template" %
6542
                                   self.op.disk_template,
6543
                                   errors.ECODE_INVAL)
6544
      if self.op.iallocator is not None:
6545
        raise errors.OpPrereqError("Disk adoption not allowed with an"
6546
                                   " iallocator script", errors.ECODE_INVAL)
6547
      if self.op.mode == constants.INSTANCE_IMPORT:
6548
        raise errors.OpPrereqError("Disk adoption not allowed for"
6549
                                   " instance import", errors.ECODE_INVAL)
6550

    
6551
    self.adopt_disks = has_adopt
6552

    
6553
    # instance name verification
6554
    if self.op.name_check:
6555
      self.hostname1 = netutils.GetHostInfo(self.op.instance_name)
6556
      self.op.instance_name = self.hostname1.name
6557
      # used in CheckPrereq for ip ping check
6558
      self.check_ip = self.hostname1.ip
6559
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
6560
      raise errors.OpPrereqError("Remote imports require names to be checked" %
6561
                                 errors.ECODE_INVAL)
6562
    else:
6563
      self.check_ip = None
6564

    
6565
    # file storage checks
6566
    if (self.op.file_driver and
6567
        not self.op.file_driver in constants.FILE_DRIVER):
6568
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
6569
                                 self.op.file_driver, errors.ECODE_INVAL)
6570

    
6571
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
6572
      raise errors.OpPrereqError("File storage directory path not absolute",
6573
                                 errors.ECODE_INVAL)
6574

    
6575
    ### Node/iallocator related checks
6576
    _CheckIAllocatorOrNode(self, "iallocator", "pnode")
6577

    
6578
    self._cds = _GetClusterDomainSecret()
6579

    
6580
    if self.op.mode == constants.INSTANCE_IMPORT:
6581
      # On import force_variant must be True, because if we forced it at
6582
      # initial install, our only chance when importing it back is that it
6583
      # works again!
6584
      self.op.force_variant = True
6585

    
6586
      if self.op.no_install:
6587
        self.LogInfo("No-installation mode has no effect during import")
6588

    
6589
    elif self.op.mode == constants.INSTANCE_CREATE:
6590
      if self.op.os_type is None:
6591
        raise errors.OpPrereqError("No guest OS specified",
6592
                                   errors.ECODE_INVAL)
6593
      if self.op.disk_template is None:
6594
        raise errors.OpPrereqError("No disk template specified",
6595
                                   errors.ECODE_INVAL)
6596

    
6597
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
6598
      # Check handshake to ensure both clusters have the same domain secret
6599
      src_handshake = self.op.source_handshake
6600
      if not src_handshake:
6601
        raise errors.OpPrereqError("Missing source handshake",
6602
                                   errors.ECODE_INVAL)
6603

    
6604
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
6605
                                                           src_handshake)
6606
      if errmsg:
6607
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
6608
                                   errors.ECODE_INVAL)
6609

    
6610
      # Load and check source CA
6611
      self.source_x509_ca_pem = self.op.source_x509_ca
6612
      if not self.source_x509_ca_pem:
6613
        raise errors.OpPrereqError("Missing source X509 CA",
6614
                                   errors.ECODE_INVAL)
6615

    
6616
      try:
6617
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
6618
                                                    self._cds)
6619
      except OpenSSL.crypto.Error, err:
6620
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
6621
                                   (err, ), errors.ECODE_INVAL)
6622

    
6623
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
6624
      if errcode is not None:
6625
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
6626
                                   errors.ECODE_INVAL)
6627

    
6628
      self.source_x509_ca = cert
6629

    
6630
      src_instance_name = self.op.source_instance_name
6631
      if not src_instance_name:
6632
        raise errors.OpPrereqError("Missing source instance name",
6633
                                   errors.ECODE_INVAL)
6634

    
6635
      norm_name = netutils.HostInfo.NormalizeName(src_instance_name)
6636
      self.source_instance_name = netutils.GetHostInfo(norm_name).name
6637

    
6638
    else:
6639
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
6640
                                 self.op.mode, errors.ECODE_INVAL)
6641

    
6642
  def ExpandNames(self):
6643
    """ExpandNames for CreateInstance.
6644

6645
    Figure out the right locks for instance creation.
6646

6647
    """
6648
    self.needed_locks = {}
6649

    
6650
    instance_name = self.op.instance_name
6651
    # this is just a preventive check, but someone might still add this
6652
    # instance in the meantime, and creation will fail at lock-add time
6653
    if instance_name in self.cfg.GetInstanceList():
6654
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
6655
                                 instance_name, errors.ECODE_EXISTS)
6656

    
6657
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
6658

    
6659
    if self.op.iallocator:
6660
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6661
    else:
6662
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
6663
      nodelist = [self.op.pnode]
6664
      if self.op.snode is not None:
6665
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
6666
        nodelist.append(self.op.snode)
6667
      self.needed_locks[locking.LEVEL_NODE] = nodelist
6668

    
6669
    # in case of import lock the source node too
6670
    if self.op.mode == constants.INSTANCE_IMPORT:
6671
      src_node = self.op.src_node
6672
      src_path = self.op.src_path
6673

    
6674
      if src_path is None:
6675
        self.op.src_path = src_path = self.op.instance_name
6676

    
6677
      if src_node is None:
6678
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6679
        self.op.src_node = None
6680
        if os.path.isabs(src_path):
6681
          raise errors.OpPrereqError("Importing an instance from an absolute"
6682
                                     " path requires a source node option.",
6683
                                     errors.ECODE_INVAL)
6684
      else:
6685
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
6686
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
6687
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
6688
        if not os.path.isabs(src_path):
6689
          self.op.src_path = src_path = \
6690
            utils.PathJoin(constants.EXPORT_DIR, src_path)
6691

    
6692
  def _RunAllocator(self):
6693
    """Run the allocator based on input opcode.
6694

6695
    """
6696
    nics = [n.ToDict() for n in self.nics]
6697
    ial = IAllocator(self.cfg, self.rpc,
6698
                     mode=constants.IALLOCATOR_MODE_ALLOC,
6699
                     name=self.op.instance_name,
6700
                     disk_template=self.op.disk_template,
6701
                     tags=[],
6702
                     os=self.op.os_type,
6703
                     vcpus=self.be_full[constants.BE_VCPUS],
6704
                     mem_size=self.be_full[constants.BE_MEMORY],
6705
                     disks=self.disks,
6706
                     nics=nics,
6707
                     hypervisor=self.op.hypervisor,
6708
                     )
6709

    
6710
    ial.Run(self.op.iallocator)
6711

    
6712
    if not ial.success:
6713
      raise errors.OpPrereqError("Can't compute nodes using"
6714
                                 " iallocator '%s': %s" %
6715
                                 (self.op.iallocator, ial.info),
6716
                                 errors.ECODE_NORES)
6717
    if len(ial.result) != ial.required_nodes:
6718
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6719
                                 " of nodes (%s), required %s" %
6720
                                 (self.op.iallocator, len(ial.result),
6721
                                  ial.required_nodes), errors.ECODE_FAULT)
6722
    self.op.pnode = ial.result[0]
6723
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
6724
                 self.op.instance_name, self.op.iallocator,
6725
                 utils.CommaJoin(ial.result))
6726
    if ial.required_nodes == 2:
6727
      self.op.snode = ial.result[1]
6728

    
6729
  def BuildHooksEnv(self):
6730
    """Build hooks env.
6731

6732
    This runs on master, primary and secondary nodes of the instance.
6733

6734
    """
6735
    env = {
6736
      "ADD_MODE": self.op.mode,
6737
      }
6738
    if self.op.mode == constants.INSTANCE_IMPORT:
6739
      env["SRC_NODE"] = self.op.src_node
6740
      env["SRC_PATH"] = self.op.src_path
6741
      env["SRC_IMAGES"] = self.src_images
6742

    
6743
    env.update(_BuildInstanceHookEnv(
6744
      name=self.op.instance_name,
6745
      primary_node=self.op.pnode,
6746
      secondary_nodes=self.secondaries,
6747
      status=self.op.start,
6748
      os_type=self.op.os_type,
6749
      memory=self.be_full[constants.BE_MEMORY],
6750
      vcpus=self.be_full[constants.BE_VCPUS],
6751
      nics=_NICListToTuple(self, self.nics),
6752
      disk_template=self.op.disk_template,
6753
      disks=[(d["size"], d["mode"]) for d in self.disks],
6754
      bep=self.be_full,
6755
      hvp=self.hv_full,
6756
      hypervisor_name=self.op.hypervisor,
6757
    ))
6758

    
6759
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
6760
          self.secondaries)
6761
    return env, nl, nl
6762

    
6763
  def _ReadExportInfo(self):
6764
    """Reads the export information from disk.
6765

6766
    It will override the opcode source node and path with the actual
6767
    information, if these two were not specified before.
6768

6769
    @return: the export information
6770

6771
    """
6772
    assert self.op.mode == constants.INSTANCE_IMPORT
6773

    
6774
    src_node = self.op.src_node
6775
    src_path = self.op.src_path
6776

    
6777
    if src_node is None:
6778
      locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
6779
      exp_list = self.rpc.call_export_list(locked_nodes)
6780
      found = False
6781
      for node in exp_list:
6782
        if exp_list[node].fail_msg:
6783
          continue
6784
        if src_path in exp_list[node].payload:
6785
          found = True
6786
          self.op.src_node = src_node = node
6787
          self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
6788
                                                       src_path)
6789
          break
6790
      if not found:
6791
        raise errors.OpPrereqError("No export found for relative path %s" %
6792
                                    src_path, errors.ECODE_INVAL)
6793

    
6794
    _CheckNodeOnline(self, src_node)
6795
    result = self.rpc.call_export_info(src_node, src_path)
6796
    result.Raise("No export or invalid export found in dir %s" % src_path)
6797

    
6798
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
6799
    if not export_info.has_section(constants.INISECT_EXP):
6800
      raise errors.ProgrammerError("Corrupted export config",
6801
                                   errors.ECODE_ENVIRON)
6802

    
6803
    ei_version = export_info.get(constants.INISECT_EXP, "version")
6804
    if (int(ei_version) != constants.EXPORT_VERSION):
6805
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
6806
                                 (ei_version, constants.EXPORT_VERSION),
6807
                                 errors.ECODE_ENVIRON)
6808
    return export_info
6809

    
6810
  def _ReadExportParams(self, einfo):
6811
    """Use export parameters as defaults.
6812

6813
    In case the opcode doesn't specify (as in override) some instance
6814
    parameters, then try to use them from the export information, if
6815
    that declares them.
6816

6817
    """
6818
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
6819

    
6820
    if self.op.disk_template is None:
6821
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
6822
        self.op.disk_template = einfo.get(constants.INISECT_INS,
6823
                                          "disk_template")
6824
      else:
6825
        raise errors.OpPrereqError("No disk template specified and the export"
6826
                                   " is missing the disk_template information",
6827
                                   errors.ECODE_INVAL)
6828

    
6829
    if not self.op.disks:
6830
      if einfo.has_option(constants.INISECT_INS, "disk_count"):
6831
        disks = []
6832
        # TODO: import the disk iv_name too
6833
        for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
6834
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
6835
          disks.append({"size": disk_sz})
6836
        self.op.disks = disks
6837
      else:
6838
        raise errors.OpPrereqError("No disk info specified and the export"
6839
                                   " is missing the disk information",
6840
                                   errors.ECODE_INVAL)
6841

    
6842
    if (not self.op.nics and
6843
        einfo.has_option(constants.INISECT_INS, "nic_count")):
6844
      nics = []
6845
      for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
6846
        ndict = {}
6847
        for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
6848
          v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
6849
          ndict[name] = v
6850
        nics.append(ndict)
6851
      self.op.nics = nics
6852

    
6853
    if (self.op.hypervisor is None and
6854
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
6855
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
6856
    if einfo.has_section(constants.INISECT_HYP):
6857
      # use the export parameters but do not override the ones
6858
      # specified by the user
6859
      for name, value in einfo.items(constants.INISECT_HYP):
6860
        if name not in self.op.hvparams:
6861
          self.op.hvparams[name] = value
6862

    
6863
    if einfo.has_section(constants.INISECT_BEP):
6864
      # use the parameters, without overriding
6865
      for name, value in einfo.items(constants.INISECT_BEP):
6866
        if name not in self.op.beparams:
6867
          self.op.beparams[name] = value
6868
    else:
6869
      # try to read the parameters old style, from the main section
6870
      for name in constants.BES_PARAMETERS:
6871
        if (name not in self.op.beparams and
6872
            einfo.has_option(constants.INISECT_INS, name)):
6873
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
6874

    
6875
    if einfo.has_section(constants.INISECT_OSP):
6876
      # use the parameters, without overriding
6877
      for name, value in einfo.items(constants.INISECT_OSP):
6878
        if name not in self.op.osparams:
6879
          self.op.osparams[name] = value
6880

    
6881
  def _RevertToDefaults(self, cluster):
6882
    """Revert the instance parameters to the default values.
6883

6884
    """
6885
    # hvparams
6886
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
6887
    for name in self.op.hvparams.keys():
6888
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
6889
        del self.op.hvparams[name]
6890
    # beparams
6891
    be_defs = cluster.SimpleFillBE({})
6892
    for name in self.op.beparams.keys():
6893
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
6894
        del self.op.beparams[name]
6895
    # nic params
6896
    nic_defs = cluster.SimpleFillNIC({})
6897
    for nic in self.op.nics:
6898
      for name in constants.NICS_PARAMETERS:
6899
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
6900
          del nic[name]
6901
    # osparams
6902
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
6903
    for name in self.op.osparams.keys():
6904
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
6905
        del self.op.osparams[name]
6906

    
6907
  def CheckPrereq(self):
6908
    """Check prerequisites.
6909

6910
    """
6911
    if self.op.mode == constants.INSTANCE_IMPORT:
6912
      export_info = self._ReadExportInfo()
6913
      self._ReadExportParams(export_info)
6914

    
6915
    _CheckDiskTemplate(self.op.disk_template)
6916

    
6917
    if (not self.cfg.GetVGName() and
6918
        self.op.disk_template not in constants.DTS_NOT_LVM):
6919
      raise errors.OpPrereqError("Cluster does not support lvm-based"
6920
                                 " instances", errors.ECODE_STATE)
6921

    
6922
    if self.op.hypervisor is None:
6923
      self.op.hypervisor = self.cfg.GetHypervisorType()
6924

    
6925
    cluster = self.cfg.GetClusterInfo()
6926
    enabled_hvs = cluster.enabled_hypervisors
6927
    if self.op.hypervisor not in enabled_hvs:
6928
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
6929
                                 " cluster (%s)" % (self.op.hypervisor,
6930
                                  ",".join(enabled_hvs)),
6931
                                 errors.ECODE_STATE)
6932

    
6933
    # check hypervisor parameter syntax (locally)
6934
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
6935
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
6936
                                      self.op.hvparams)
6937
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
6938
    hv_type.CheckParameterSyntax(filled_hvp)
6939
    self.hv_full = filled_hvp
6940
    # check that we don't specify global parameters on an instance
6941
    _CheckGlobalHvParams(self.op.hvparams)
6942

    
6943
    # fill and remember the beparams dict
6944
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
6945
    self.be_full = cluster.SimpleFillBE(self.op.beparams)
6946

    
6947
    # build os parameters
6948
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
6949

    
6950
    # now that hvp/bep are in final format, let's reset to defaults,
6951
    # if told to do so
6952
    if self.op.identify_defaults:
6953
      self._RevertToDefaults(cluster)
6954

    
6955
    # NIC buildup
6956
    self.nics = []
6957
    for idx, nic in enumerate(self.op.nics):
6958
      nic_mode_req = nic.get("mode", None)
6959
      nic_mode = nic_mode_req
6960
      if nic_mode is None:
6961
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
6962

    
6963
      # in routed mode, for the first nic, the default ip is 'auto'
6964
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
6965
        default_ip_mode = constants.VALUE_AUTO
6966
      else:
6967
        default_ip_mode = constants.VALUE_NONE
6968

    
6969
      # ip validity checks
6970
      ip = nic.get("ip", default_ip_mode)
6971
      if ip is None or ip.lower() == constants.VALUE_NONE:
6972
        nic_ip = None
6973
      elif ip.lower() == constants.VALUE_AUTO:
6974
        if not self.op.name_check:
6975
          raise errors.OpPrereqError("IP address set to auto but name checks"
6976
                                     " have been skipped. Aborting.",
6977
                                     errors.ECODE_INVAL)
6978
        nic_ip = self.hostname1.ip
6979
      else:
6980
        if not netutils.IsValidIP4(ip):
6981
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
6982
                                     " like a valid IP" % ip,
6983
                                     errors.ECODE_INVAL)
6984
        nic_ip = ip
6985

    
6986
      # TODO: check the ip address for uniqueness
6987
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
6988
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
6989
                                   errors.ECODE_INVAL)
6990

    
6991
      # MAC address verification
6992
      mac = nic.get("mac", constants.VALUE_AUTO)
6993
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6994
        mac = utils.NormalizeAndValidateMac(mac)
6995

    
6996
        try:
6997
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
6998
        except errors.ReservationError:
6999
          raise errors.OpPrereqError("MAC address %s already in use"
7000
                                     " in cluster" % mac,
7001
                                     errors.ECODE_NOTUNIQUE)
7002

    
7003
      # bridge verification
7004
      bridge = nic.get("bridge", None)
7005
      link = nic.get("link", None)
7006
      if bridge and link:
7007
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
7008
                                   " at the same time", errors.ECODE_INVAL)
7009
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
7010
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
7011
                                   errors.ECODE_INVAL)
7012
      elif bridge:
7013
        link = bridge
7014

    
7015
      nicparams = {}
7016
      if nic_mode_req:
7017
        nicparams[constants.NIC_MODE] = nic_mode_req
7018
      if link:
7019
        nicparams[constants.NIC_LINK] = link
7020

    
7021
      check_params = cluster.SimpleFillNIC(nicparams)
7022
      objects.NIC.CheckParameterSyntax(check_params)
7023
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
7024

    
7025
    # disk checks/pre-build
7026
    self.disks = []
7027
    for disk in self.op.disks:
7028
      mode = disk.get("mode", constants.DISK_RDWR)
7029
      if mode not in constants.DISK_ACCESS_SET:
7030
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
7031
                                   mode, errors.ECODE_INVAL)
7032
      size = disk.get("size", None)
7033
      if size is None:
7034
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
7035
      try:
7036
        size = int(size)
7037
      except (TypeError, ValueError):
7038
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
7039
                                   errors.ECODE_INVAL)
7040
      new_disk = {"size": size, "mode": mode}
7041
      if "adopt" in disk:
7042
        new_disk["adopt"] = disk["adopt"]
7043
      self.disks.append(new_disk)
7044

    
7045
    if self.op.mode == constants.INSTANCE_IMPORT:
7046

    
7047
      # Check that the new instance doesn't have less disks than the export
7048
      instance_disks = len(self.disks)
7049
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
7050
      if instance_disks < export_disks:
7051
        raise errors.OpPrereqError("Not enough disks to import."
7052
                                   " (instance: %d, export: %d)" %
7053
                                   (instance_disks, export_disks),
7054
                                   errors.ECODE_INVAL)
7055

    
7056
      disk_images = []
7057
      for idx in range(export_disks):
7058
        option = 'disk%d_dump' % idx
7059
        if export_info.has_option(constants.INISECT_INS, option):
7060
          # FIXME: are the old os-es, disk sizes, etc. useful?
7061
          export_name = export_info.get(constants.INISECT_INS, option)
7062
          image = utils.PathJoin(self.op.src_path, export_name)
7063
          disk_images.append(image)
7064
        else:
7065
          disk_images.append(False)
7066

    
7067
      self.src_images = disk_images
7068

    
7069
      old_name = export_info.get(constants.INISECT_INS, 'name')
7070
      try:
7071
        exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
7072
      except (TypeError, ValueError), err:
7073
        raise errors.OpPrereqError("Invalid export file, nic_count is not"
7074
                                   " an integer: %s" % str(err),
7075
                                   errors.ECODE_STATE)
7076
      if self.op.instance_name == old_name:
7077
        for idx, nic in enumerate(self.nics):
7078
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
7079
            nic_mac_ini = 'nic%d_mac' % idx
7080
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
7081

    
7082
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
7083

    
7084
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
7085
    if self.op.ip_check:
7086
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
7087
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
7088
                                   (self.check_ip, self.op.instance_name),
7089
                                   errors.ECODE_NOTUNIQUE)
7090

    
7091
    #### mac address generation
7092
    # By generating here the mac address both the allocator and the hooks get
7093
    # the real final mac address rather than the 'auto' or 'generate' value.
7094
    # There is a race condition between the generation and the instance object
7095
    # creation, which means that we know the mac is valid now, but we're not
7096
    # sure it will be when we actually add the instance. If things go bad
7097
    # adding the instance will abort because of a duplicate mac, and the
7098
    # creation job will fail.
7099
    for nic in self.nics:
7100
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7101
        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
7102

    
7103
    #### allocator run
7104

    
7105
    if self.op.iallocator is not None:
7106
      self._RunAllocator()
7107

    
7108
    #### node related checks
7109

    
7110
    # check primary node
7111
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
7112
    assert self.pnode is not None, \
7113
      "Cannot retrieve locked node %s" % self.op.pnode
7114
    if pnode.offline:
7115
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
7116
                                 pnode.name, errors.ECODE_STATE)
7117
    if pnode.drained:
7118
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
7119
                                 pnode.name, errors.ECODE_STATE)
7120

    
7121
    self.secondaries = []
7122

    
7123
    # mirror node verification
7124
    if self.op.disk_template in constants.DTS_NET_MIRROR:
7125
      if self.op.snode is None:
7126
        raise errors.OpPrereqError("The networked disk templates need"
7127
                                   " a mirror node", errors.ECODE_INVAL)
7128
      if self.op.snode == pnode.name:
7129
        raise errors.OpPrereqError("The secondary node cannot be the"
7130
                                   " primary node.", errors.ECODE_INVAL)
7131
      _CheckNodeOnline(self, self.op.snode)
7132
      _CheckNodeNotDrained(self, self.op.snode)
7133
      self.secondaries.append(self.op.snode)
7134

    
7135
    nodenames = [pnode.name] + self.secondaries
7136

    
7137
    req_size = _ComputeDiskSize(self.op.disk_template,
7138
                                self.disks)
7139

    
7140
    # Check lv size requirements, if not adopting
7141
    if req_size is not None and not self.adopt_disks:
7142
      _CheckNodesFreeDisk(self, nodenames, req_size)
7143

    
7144
    if self.adopt_disks: # instead, we must check the adoption data
7145
      all_lvs = set([i["adopt"] for i in self.disks])
7146
      if len(all_lvs) != len(self.disks):
7147
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
7148
                                   errors.ECODE_INVAL)
7149
      for lv_name in all_lvs:
7150
        try:
7151
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
7152
        except errors.ReservationError:
7153
          raise errors.OpPrereqError("LV named %s used by another instance" %
7154
                                     lv_name, errors.ECODE_NOTUNIQUE)
7155

    
7156
      node_lvs = self.rpc.call_lv_list([pnode.name],
7157
                                       self.cfg.GetVGName())[pnode.name]
7158
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
7159
      node_lvs = node_lvs.payload
7160
      delta = all_lvs.difference(node_lvs.keys())
7161
      if delta:
7162
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
7163
                                   utils.CommaJoin(delta),
7164
                                   errors.ECODE_INVAL)
7165
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
7166
      if online_lvs:
7167
        raise errors.OpPrereqError("Online logical volumes found, cannot"
7168
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
7169
                                   errors.ECODE_STATE)
7170
      # update the size of disk based on what is found
7171
      for dsk in self.disks:
7172
        dsk["size"] = int(float(node_lvs[dsk["adopt"]][0]))
7173

    
7174
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
7175

    
7176
    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
7177
    # check OS parameters (remotely)
7178
    _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
7179

    
7180
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
7181

    
7182
    # memory check on primary node
7183
    if self.op.start:
7184
      _CheckNodeFreeMemory(self, self.pnode.name,
7185
                           "creating instance %s" % self.op.instance_name,
7186
                           self.be_full[constants.BE_MEMORY],
7187
                           self.op.hypervisor)
7188

    
7189
    self.dry_run_result = list(nodenames)
7190

    
7191
  def Exec(self, feedback_fn):
7192
    """Create and add the instance to the cluster.
7193

7194
    """
7195
    instance = self.op.instance_name
7196
    pnode_name = self.pnode.name
7197

    
7198
    ht_kind = self.op.hypervisor
7199
    if ht_kind in constants.HTS_REQ_PORT:
7200
      network_port = self.cfg.AllocatePort()
7201
    else:
7202
      network_port = None
7203

    
7204
    if constants.ENABLE_FILE_STORAGE:
7205
      # this is needed because os.path.join does not accept None arguments
7206
      if self.op.file_storage_dir is None:
7207
        string_file_storage_dir = ""
7208
      else:
7209
        string_file_storage_dir = self.op.file_storage_dir
7210

    
7211
      # build the full file storage dir path
7212
      file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
7213
                                        string_file_storage_dir, instance)
7214
    else:
7215
      file_storage_dir = ""
7216

    
7217
    disks = _GenerateDiskTemplate(self,
7218
                                  self.op.disk_template,
7219
                                  instance, pnode_name,
7220
                                  self.secondaries,
7221
                                  self.disks,
7222
                                  file_storage_dir,
7223
                                  self.op.file_driver,
7224
                                  0)
7225

    
7226
    iobj = objects.Instance(name=instance, os=self.op.os_type,
7227
                            primary_node=pnode_name,
7228
                            nics=self.nics, disks=disks,
7229
                            disk_template=self.op.disk_template,
7230
                            admin_up=False,
7231
                            network_port=network_port,
7232
                            beparams=self.op.beparams,
7233
                            hvparams=self.op.hvparams,
7234
                            hypervisor=self.op.hypervisor,
7235
                            osparams=self.op.osparams,
7236
                            )
7237

    
7238
    if self.adopt_disks:
7239
      # rename LVs to the newly-generated names; we need to construct
7240
      # 'fake' LV disks with the old data, plus the new unique_id
7241
      tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
7242
      rename_to = []
7243
      for t_dsk, a_dsk in zip (tmp_disks, self.disks):
7244
        rename_to.append(t_dsk.logical_id)
7245
        t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
7246
        self.cfg.SetDiskID(t_dsk, pnode_name)
7247
      result = self.rpc.call_blockdev_rename(pnode_name,
7248
                                             zip(tmp_disks, rename_to))
7249
      result.Raise("Failed to rename adoped LVs")
7250
    else:
7251
      feedback_fn("* creating instance disks...")
7252
      try:
7253
        _CreateDisks(self, iobj)
7254
      except errors.OpExecError:
7255
        self.LogWarning("Device creation failed, reverting...")
7256
        try:
7257
          _RemoveDisks(self, iobj)
7258
        finally:
7259
          self.cfg.ReleaseDRBDMinors(instance)
7260
          raise
7261

    
7262
    feedback_fn("adding instance %s to cluster config" % instance)
7263

    
7264
    self.cfg.AddInstance(iobj, self.proc.GetECId())
7265

    
7266
    # Declare that we don't want to remove the instance lock anymore, as we've
7267
    # added the instance to the config
7268
    del self.remove_locks[locking.LEVEL_INSTANCE]
7269
    # Unlock all the nodes
7270
    if self.op.mode == constants.INSTANCE_IMPORT:
7271
      nodes_keep = [self.op.src_node]
7272
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
7273
                       if node != self.op.src_node]
7274
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
7275
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
7276
    else:
7277
      self.context.glm.release(locking.LEVEL_NODE)
7278
      del self.acquired_locks[locking.LEVEL_NODE]
7279

    
7280
    if self.op.wait_for_sync:
7281
      disk_abort = not _WaitForSync(self, iobj)
7282
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
7283
      # make sure the disks are not degraded (still sync-ing is ok)
7284
      time.sleep(15)
7285
      feedback_fn("* checking mirrors status")
7286
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
7287
    else:
7288
      disk_abort = False
7289

    
7290
    if disk_abort:
7291
      _RemoveDisks(self, iobj)
7292
      self.cfg.RemoveInstance(iobj.name)
7293
      # Make sure the instance lock gets removed
7294
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
7295
      raise errors.OpExecError("There are some degraded disks for"
7296
                               " this instance")
7297

    
7298
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
7299
      if self.op.mode == constants.INSTANCE_CREATE:
7300
        if not self.op.no_install:
7301
          feedback_fn("* running the instance OS create scripts...")
7302
          # FIXME: pass debug option from opcode to backend
7303
          result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
7304
                                                 self.op.debug_level)
7305
          result.Raise("Could not add os for instance %s"
7306
                       " on node %s" % (instance, pnode_name))
7307

    
7308
      elif self.op.mode == constants.INSTANCE_IMPORT:
7309
        feedback_fn("* running the instance OS import scripts...")
7310

    
7311
        transfers = []
7312

    
7313
        for idx, image in enumerate(self.src_images):
7314
          if not image:
7315
            continue
7316

    
7317
          # FIXME: pass debug option from opcode to backend
7318
          dt = masterd.instance.DiskTransfer("disk/%s" % idx,
7319
                                             constants.IEIO_FILE, (image, ),
7320
                                             constants.IEIO_SCRIPT,
7321
                                             (iobj.disks[idx], idx),
7322
                                             None)
7323
          transfers.append(dt)
7324

    
7325
        import_result = \
7326
          masterd.instance.TransferInstanceData(self, feedback_fn,
7327
                                                self.op.src_node, pnode_name,
7328
                                                self.pnode.secondary_ip,
7329
                                                iobj, transfers)
7330
        if not compat.all(import_result):
7331
          self.LogWarning("Some disks for instance %s on node %s were not"
7332
                          " imported successfully" % (instance, pnode_name))
7333

    
7334
      elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7335
        feedback_fn("* preparing remote import...")
7336
        connect_timeout = constants.RIE_CONNECT_TIMEOUT
7337
        timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
7338

    
7339
        disk_results = masterd.instance.RemoteImport(self, feedback_fn, iobj,
7340
                                                     self.source_x509_ca,
7341
                                                     self._cds, timeouts)
7342
        if not compat.all(disk_results):
7343
          # TODO: Should the instance still be started, even if some disks
7344
          # failed to import (valid for local imports, too)?
7345
          self.LogWarning("Some disks for instance %s on node %s were not"
7346
                          " imported successfully" % (instance, pnode_name))
7347

    
7348
        # Run rename script on newly imported instance
7349
        assert iobj.name == instance
7350
        feedback_fn("Running rename script for %s" % instance)
7351
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
7352
                                                   self.source_instance_name,
7353
                                                   self.op.debug_level)
7354
        if result.fail_msg:
7355
          self.LogWarning("Failed to run rename script for %s on node"
7356
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
7357

    
7358
      else:
7359
        # also checked in the prereq part
7360
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
7361
                                     % self.op.mode)
7362

    
7363
    if self.op.start:
7364
      iobj.admin_up = True
7365
      self.cfg.Update(iobj, feedback_fn)
7366
      logging.info("Starting instance %s on node %s", instance, pnode_name)
7367
      feedback_fn("* starting instance...")
7368
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
7369
      result.Raise("Could not start instance")
7370

    
7371
    return list(iobj.all_nodes)
7372

    
7373

    
7374
class LUConnectConsole(NoHooksLU):
7375
  """Connect to an instance's console.
7376

7377
  This is somewhat special in that it returns the command line that
7378
  you need to run on the master node in order to connect to the
7379
  console.
7380

7381
  """
7382
  _OP_PARAMS = [
7383
    _PInstanceName
7384
    ]
7385
  REQ_BGL = False
7386

    
7387
  def ExpandNames(self):
7388
    self._ExpandAndLockInstance()
7389

    
7390
  def CheckPrereq(self):
7391
    """Check prerequisites.
7392

7393
    This checks that the instance is in the cluster.
7394

7395
    """
7396
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7397
    assert self.instance is not None, \
7398
      "Cannot retrieve locked instance %s" % self.op.instance_name
7399
    _CheckNodeOnline(self, self.instance.primary_node)
7400

    
7401
  def Exec(self, feedback_fn):
7402
    """Connect to the console of an instance
7403

7404
    """
7405
    instance = self.instance
7406
    node = instance.primary_node
7407

    
7408
    node_insts = self.rpc.call_instance_list([node],
7409
                                             [instance.hypervisor])[node]
7410
    node_insts.Raise("Can't get node information from %s" % node)
7411

    
7412
    if instance.name not in node_insts.payload:
7413
      raise errors.OpExecError("Instance %s is not running." % instance.name)
7414

    
7415
    logging.debug("Connecting to console of %s on %s", instance.name, node)
7416

    
7417
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
7418
    cluster = self.cfg.GetClusterInfo()
7419
    # beparams and hvparams are passed separately, to avoid editing the
7420
    # instance and then saving the defaults in the instance itself.
7421
    hvparams = cluster.FillHV(instance)
7422
    beparams = cluster.FillBE(instance)
7423
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
7424

    
7425
    # build ssh cmdline
7426
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
7427

    
7428

    
7429
class LUReplaceDisks(LogicalUnit):
7430
  """Replace the disks of an instance.
7431

7432
  """
7433
  HPATH = "mirrors-replace"
7434
  HTYPE = constants.HTYPE_INSTANCE
7435
  _OP_PARAMS = [
7436
    _PInstanceName,
7437
    ("mode", _NoDefault, _TElemOf(constants.REPLACE_MODES)),
7438
    ("disks", _EmptyList, _TListOf(_TPositiveInt)),
7439
    ("remote_node", None, _TMaybeString),
7440
    ("iallocator", None, _TMaybeString),
7441
    ("early_release", False, _TBool),
7442
    ]
7443
  REQ_BGL = False
7444

    
7445
  def CheckArguments(self):
7446
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
7447
                                  self.op.iallocator)
7448

    
7449
  def ExpandNames(self):
7450
    self._ExpandAndLockInstance()
7451

    
7452
    if self.op.iallocator is not None:
7453
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7454

    
7455
    elif self.op.remote_node is not None:
7456
      remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
7457
      self.op.remote_node = remote_node
7458

    
7459
      # Warning: do not remove the locking of the new secondary here
7460
      # unless DRBD8.AddChildren is changed to work in parallel;
7461
      # currently it doesn't since parallel invocations of
7462
      # FindUnusedMinor will conflict
7463
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
7464
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7465

    
7466
    else:
7467
      self.needed_locks[locking.LEVEL_NODE] = []
7468
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7469

    
7470
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
7471
                                   self.op.iallocator, self.op.remote_node,
7472
                                   self.op.disks, False, self.op.early_release)
7473

    
7474
    self.tasklets = [self.replacer]
7475

    
7476
  def DeclareLocks(self, level):
7477
    # If we're not already locking all nodes in the set we have to declare the
7478
    # instance's primary/secondary nodes.
7479
    if (level == locking.LEVEL_NODE and
7480
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
7481
      self._LockInstancesNodes()
7482

    
7483
  def BuildHooksEnv(self):
7484
    """Build hooks env.
7485

7486
    This runs on the master, the primary and all the secondaries.
7487

7488
    """
7489
    instance = self.replacer.instance
7490
    env = {
7491
      "MODE": self.op.mode,
7492
      "NEW_SECONDARY": self.op.remote_node,
7493
      "OLD_SECONDARY": instance.secondary_nodes[0],
7494
      }
7495
    env.update(_BuildInstanceHookEnvByObject(self, instance))
7496
    nl = [
7497
      self.cfg.GetMasterNode(),
7498
      instance.primary_node,
7499
      ]
7500
    if self.op.remote_node is not None:
7501
      nl.append(self.op.remote_node)
7502
    return env, nl, nl
7503

    
7504

    
7505
class TLReplaceDisks(Tasklet):
7506
  """Replaces disks for an instance.
7507

7508
  Note: Locking is not within the scope of this class.
7509

7510
  """
7511
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
7512
               disks, delay_iallocator, early_release):
7513
    """Initializes this class.
7514

7515
    """
7516
    Tasklet.__init__(self, lu)
7517

    
7518
    # Parameters
7519
    self.instance_name = instance_name
7520
    self.mode = mode
7521
    self.iallocator_name = iallocator_name
7522
    self.remote_node = remote_node
7523
    self.disks = disks
7524
    self.delay_iallocator = delay_iallocator
7525
    self.early_release = early_release
7526

    
7527
    # Runtime data
7528
    self.instance = None
7529
    self.new_node = None
7530
    self.target_node = None
7531
    self.other_node = None
7532
    self.remote_node_info = None
7533
    self.node_secondary_ip = None
7534

    
7535
  @staticmethod
7536
  def CheckArguments(mode, remote_node, iallocator):
7537
    """Helper function for users of this class.
7538

7539
    """
7540
    # check for valid parameter combination
7541
    if mode == constants.REPLACE_DISK_CHG:
7542
      if remote_node is None and iallocator is None:
7543
        raise errors.OpPrereqError("When changing the secondary either an"
7544
                                   " iallocator script must be used or the"
7545
                                   " new node given", errors.ECODE_INVAL)
7546

    
7547
      if remote_node is not None and iallocator is not None:
7548
        raise errors.OpPrereqError("Give either the iallocator or the new"
7549
                                   " secondary, not both", errors.ECODE_INVAL)
7550

    
7551
    elif remote_node is not None or iallocator is not None:
7552
      # Not replacing the secondary
7553
      raise errors.OpPrereqError("The iallocator and new node options can"
7554
                                 " only be used when changing the"
7555
                                 " secondary node", errors.ECODE_INVAL)
7556

    
7557
  @staticmethod
7558
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
7559
    """Compute a new secondary node using an IAllocator.
7560

7561
    """
7562
    ial = IAllocator(lu.cfg, lu.rpc,
7563
                     mode=constants.IALLOCATOR_MODE_RELOC,
7564
                     name=instance_name,
7565
                     relocate_from=relocate_from)
7566

    
7567
    ial.Run(iallocator_name)
7568

    
7569
    if not ial.success:
7570
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
7571
                                 " %s" % (iallocator_name, ial.info),
7572
                                 errors.ECODE_NORES)
7573

    
7574
    if len(ial.result) != ial.required_nodes:
7575
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7576
                                 " of nodes (%s), required %s" %
7577
                                 (iallocator_name,
7578
                                  len(ial.result), ial.required_nodes),
7579
                                 errors.ECODE_FAULT)
7580

    
7581
    remote_node_name = ial.result[0]
7582

    
7583
    lu.LogInfo("Selected new secondary for instance '%s': %s",
7584
               instance_name, remote_node_name)
7585

    
7586
    return remote_node_name
7587

    
7588
  def _FindFaultyDisks(self, node_name):
7589
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
7590
                                    node_name, True)
7591

    
7592
  def CheckPrereq(self):
7593
    """Check prerequisites.
7594

7595
    This checks that the instance is in the cluster.
7596

7597
    """
7598
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
7599
    assert instance is not None, \
7600
      "Cannot retrieve locked instance %s" % self.instance_name
7601

    
7602
    if instance.disk_template != constants.DT_DRBD8:
7603
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
7604
                                 " instances", errors.ECODE_INVAL)
7605

    
7606
    if len(instance.secondary_nodes) != 1:
7607
      raise errors.OpPrereqError("The instance has a strange layout,"
7608
                                 " expected one secondary but found %d" %
7609
                                 len(instance.secondary_nodes),
7610
                                 errors.ECODE_FAULT)
7611

    
7612
    if not self.delay_iallocator:
7613
      self._CheckPrereq2()
7614

    
7615
  def _CheckPrereq2(self):
7616
    """Check prerequisites, second part.
7617

7618
    This function should always be part of CheckPrereq. It was separated and is
7619
    now called from Exec because during node evacuation iallocator was only
7620
    called with an unmodified cluster model, not taking planned changes into
7621
    account.
7622

7623
    """
7624
    instance = self.instance
7625
    secondary_node = instance.secondary_nodes[0]
7626

    
7627
    if self.iallocator_name is None:
7628
      remote_node = self.remote_node
7629
    else:
7630
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
7631
                                       instance.name, instance.secondary_nodes)
7632

    
7633
    if remote_node is not None:
7634
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
7635
      assert self.remote_node_info is not None, \
7636
        "Cannot retrieve locked node %s" % remote_node
7637
    else:
7638
      self.remote_node_info = None
7639

    
7640
    if remote_node == self.instance.primary_node:
7641
      raise errors.OpPrereqError("The specified node is the primary node of"
7642
                                 " the instance.", errors.ECODE_INVAL)
7643

    
7644
    if remote_node == secondary_node:
7645
      raise errors.OpPrereqError("The specified node is already the"
7646
                                 " secondary node of the instance.",
7647
                                 errors.ECODE_INVAL)
7648

    
7649
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
7650
                                    constants.REPLACE_DISK_CHG):
7651
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
7652
                                 errors.ECODE_INVAL)
7653

    
7654
    if self.mode == constants.REPLACE_DISK_AUTO:
7655
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
7656
      faulty_secondary = self._FindFaultyDisks(secondary_node)
7657

    
7658
      if faulty_primary and faulty_secondary:
7659
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
7660
                                   " one node and can not be repaired"
7661
                                   " automatically" % self.instance_name,
7662
                                   errors.ECODE_STATE)
7663

    
7664
      if faulty_primary:
7665
        self.disks = faulty_primary
7666
        self.target_node = instance.primary_node
7667
        self.other_node = secondary_node
7668
        check_nodes = [self.target_node, self.other_node]
7669
      elif faulty_secondary:
7670
        self.disks = faulty_secondary
7671
        self.target_node = secondary_node
7672
        self.other_node = instance.primary_node
7673
        check_nodes = [self.target_node, self.other_node]
7674
      else:
7675
        self.disks = []
7676
        check_nodes = []
7677

    
7678
    else:
7679
      # Non-automatic modes
7680
      if self.mode == constants.REPLACE_DISK_PRI:
7681
        self.target_node = instance.primary_node
7682
        self.other_node = secondary_node
7683
        check_nodes = [self.target_node, self.other_node]
7684

    
7685
      elif self.mode == constants.REPLACE_DISK_SEC:
7686
        self.target_node = secondary_node
7687
        self.other_node = instance.primary_node
7688
        check_nodes = [self.target_node, self.other_node]
7689

    
7690
      elif self.mode == constants.REPLACE_DISK_CHG:
7691
        self.new_node = remote_node
7692
        self.other_node = instance.primary_node
7693
        self.target_node = secondary_node
7694
        check_nodes = [self.new_node, self.other_node]
7695

    
7696
        _CheckNodeNotDrained(self.lu, remote_node)
7697

    
7698
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
7699
        assert old_node_info is not None
7700
        if old_node_info.offline and not self.early_release:
7701
          # doesn't make sense to delay the release
7702
          self.early_release = True
7703
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
7704
                          " early-release mode", secondary_node)
7705

    
7706
      else:
7707
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
7708
                                     self.mode)
7709

    
7710
      # If not specified all disks should be replaced
7711
      if not self.disks:
7712
        self.disks = range(len(self.instance.disks))
7713

    
7714
    for node in check_nodes:
7715
      _CheckNodeOnline(self.lu, node)
7716

    
7717
    # Check whether disks are valid
7718
    for disk_idx in self.disks:
7719
      instance.FindDisk(disk_idx)
7720

    
7721
    # Get secondary node IP addresses
7722
    node_2nd_ip = {}
7723

    
7724
    for node_name in [self.target_node, self.other_node, self.new_node]:
7725
      if node_name is not None:
7726
        node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
7727

    
7728
    self.node_secondary_ip = node_2nd_ip
7729

    
7730
  def Exec(self, feedback_fn):
7731
    """Execute disk replacement.
7732

7733
    This dispatches the disk replacement to the appropriate handler.
7734

7735
    """
7736
    if self.delay_iallocator:
7737
      self._CheckPrereq2()
7738

    
7739
    if not self.disks:
7740
      feedback_fn("No disks need replacement")
7741
      return
7742

    
7743
    feedback_fn("Replacing disk(s) %s for %s" %
7744
                (utils.CommaJoin(self.disks), self.instance.name))
7745

    
7746
    activate_disks = (not self.instance.admin_up)
7747

    
7748
    # Activate the instance disks if we're replacing them on a down instance
7749
    if activate_disks:
7750
      _StartInstanceDisks(self.lu, self.instance, True)
7751

    
7752
    try:
7753
      # Should we replace the secondary node?
7754
      if self.new_node is not None:
7755
        fn = self._ExecDrbd8Secondary
7756
      else:
7757
        fn = self._ExecDrbd8DiskOnly
7758

    
7759
      return fn(feedback_fn)
7760

    
7761
    finally:
7762
      # Deactivate the instance disks if we're replacing them on a
7763
      # down instance
7764
      if activate_disks:
7765
        _SafeShutdownInstanceDisks(self.lu, self.instance)
7766

    
7767
  def _CheckVolumeGroup(self, nodes):
7768
    self.lu.LogInfo("Checking volume groups")
7769

    
7770
    vgname = self.cfg.GetVGName()
7771

    
7772
    # Make sure volume group exists on all involved nodes
7773
    results = self.rpc.call_vg_list(nodes)
7774
    if not results:
7775
      raise errors.OpExecError("Can't list volume groups on the nodes")
7776

    
7777
    for node in nodes:
7778
      res = results[node]
7779
      res.Raise("Error checking node %s" % node)
7780
      if vgname not in res.payload:
7781
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
7782
                                 (vgname, node))
7783

    
7784
  def _CheckDisksExistence(self, nodes):
7785
    # Check disk existence
7786
    for idx, dev in enumerate(self.instance.disks):
7787
      if idx not in self.disks:
7788
        continue
7789

    
7790
      for node in nodes:
7791
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
7792
        self.cfg.SetDiskID(dev, node)
7793

    
7794
        result = self.rpc.call_blockdev_find(node, dev)
7795

    
7796
        msg = result.fail_msg
7797
        if msg or not result.payload:
7798
          if not msg:
7799
            msg = "disk not found"
7800
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
7801
                                   (idx, node, msg))
7802

    
7803
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
7804
    for idx, dev in enumerate(self.instance.disks):
7805
      if idx not in self.disks:
7806
        continue
7807

    
7808
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
7809
                      (idx, node_name))
7810

    
7811
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
7812
                                   ldisk=ldisk):
7813
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
7814
                                 " replace disks for instance %s" %
7815
                                 (node_name, self.instance.name))
7816

    
7817
  def _CreateNewStorage(self, node_name):
7818
    vgname = self.cfg.GetVGName()
7819
    iv_names = {}
7820

    
7821
    for idx, dev in enumerate(self.instance.disks):
7822
      if idx not in self.disks:
7823
        continue
7824

    
7825
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
7826

    
7827
      self.cfg.SetDiskID(dev, node_name)
7828

    
7829
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
7830
      names = _GenerateUniqueNames(self.lu, lv_names)
7831

    
7832
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
7833
                             logical_id=(vgname, names[0]))
7834
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
7835
                             logical_id=(vgname, names[1]))
7836

    
7837
      new_lvs = [lv_data, lv_meta]
7838
      old_lvs = dev.children
7839
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
7840

    
7841
      # we pass force_create=True to force the LVM creation
7842
      for new_lv in new_lvs:
7843
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
7844
                        _GetInstanceInfoText(self.instance), False)
7845

    
7846
    return iv_names
7847

    
7848
  def _CheckDevices(self, node_name, iv_names):
7849
    for name, (dev, _, _) in iv_names.iteritems():
7850
      self.cfg.SetDiskID(dev, node_name)
7851

    
7852
      result = self.rpc.call_blockdev_find(node_name, dev)
7853

    
7854
      msg = result.fail_msg
7855
      if msg or not result.payload:
7856
        if not msg:
7857
          msg = "disk not found"
7858
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
7859
                                 (name, msg))
7860

    
7861
      if result.payload.is_degraded:
7862
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
7863

    
7864
  def _RemoveOldStorage(self, node_name, iv_names):
7865
    for name, (_, old_lvs, _) in iv_names.iteritems():
7866
      self.lu.LogInfo("Remove logical volumes for %s" % name)
7867

    
7868
      for lv in old_lvs:
7869
        self.cfg.SetDiskID(lv, node_name)
7870

    
7871
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
7872
        if msg:
7873
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
7874
                             hint="remove unused LVs manually")
7875

    
7876
  def _ReleaseNodeLock(self, node_name):
7877
    """Releases the lock for a given node."""
7878
    self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
7879

    
7880
  def _ExecDrbd8DiskOnly(self, feedback_fn):
7881
    """Replace a disk on the primary or secondary for DRBD 8.
7882

7883
    The algorithm for replace is quite complicated:
7884

7885
      1. for each disk to be replaced:
7886

7887
        1. create new LVs on the target node with unique names
7888
        1. detach old LVs from the drbd device
7889
        1. rename old LVs to name_replaced.<time_t>
7890
        1. rename new LVs to old LVs
7891
        1. attach the new LVs (with the old names now) to the drbd device
7892

7893
      1. wait for sync across all devices
7894

7895
      1. for each modified disk:
7896

7897
        1. remove old LVs (which have the name name_replaces.<time_t>)
7898

7899
    Failures are not very well handled.
7900

7901
    """
7902
    steps_total = 6
7903

    
7904
    # Step: check device activation
7905
    self.lu.LogStep(1, steps_total, "Check device existence")
7906
    self._CheckDisksExistence([self.other_node, self.target_node])
7907
    self._CheckVolumeGroup([self.target_node, self.other_node])
7908

    
7909
    # Step: check other node consistency
7910
    self.lu.LogStep(2, steps_total, "Check peer consistency")
7911
    self._CheckDisksConsistency(self.other_node,
7912
                                self.other_node == self.instance.primary_node,
7913
                                False)
7914

    
7915
    # Step: create new storage
7916
    self.lu.LogStep(3, steps_total, "Allocate new storage")
7917
    iv_names = self._CreateNewStorage(self.target_node)
7918

    
7919
    # Step: for each lv, detach+rename*2+attach
7920
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
7921
    for dev, old_lvs, new_lvs in iv_names.itervalues():
7922
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
7923

    
7924
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
7925
                                                     old_lvs)
7926
      result.Raise("Can't detach drbd from local storage on node"
7927
                   " %s for device %s" % (self.target_node, dev.iv_name))
7928
      #dev.children = []
7929
      #cfg.Update(instance)
7930

    
7931
      # ok, we created the new LVs, so now we know we have the needed
7932
      # storage; as such, we proceed on the target node to rename
7933
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
7934
      # using the assumption that logical_id == physical_id (which in
7935
      # turn is the unique_id on that node)
7936

    
7937
      # FIXME(iustin): use a better name for the replaced LVs
7938
      temp_suffix = int(time.time())
7939
      ren_fn = lambda d, suff: (d.physical_id[0],
7940
                                d.physical_id[1] + "_replaced-%s" % suff)
7941

    
7942
      # Build the rename list based on what LVs exist on the node
7943
      rename_old_to_new = []
7944
      for to_ren in old_lvs:
7945
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
7946
        if not result.fail_msg and result.payload:
7947
          # device exists
7948
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
7949

    
7950
      self.lu.LogInfo("Renaming the old LVs on the target node")
7951
      result = self.rpc.call_blockdev_rename(self.target_node,
7952
                                             rename_old_to_new)
7953
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
7954

    
7955
      # Now we rename the new LVs to the old LVs
7956
      self.lu.LogInfo("Renaming the new LVs on the target node")
7957
      rename_new_to_old = [(new, old.physical_id)
7958
                           for old, new in zip(old_lvs, new_lvs)]
7959
      result = self.rpc.call_blockdev_rename(self.target_node,
7960
                                             rename_new_to_old)
7961
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
7962

    
7963
      for old, new in zip(old_lvs, new_lvs):
7964
        new.logical_id = old.logical_id
7965
        self.cfg.SetDiskID(new, self.target_node)
7966

    
7967
      for disk in old_lvs:
7968
        disk.logical_id = ren_fn(disk, temp_suffix)
7969
        self.cfg.SetDiskID(disk, self.target_node)
7970

    
7971
      # Now that the new lvs have the old name, we can add them to the device
7972
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
7973
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
7974
                                                  new_lvs)
7975
      msg = result.fail_msg
7976
      if msg:
7977
        for new_lv in new_lvs:
7978
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
7979
                                               new_lv).fail_msg
7980
          if msg2:
7981
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
7982
                               hint=("cleanup manually the unused logical"
7983
                                     "volumes"))
7984
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
7985

    
7986
      dev.children = new_lvs
7987

    
7988
      self.cfg.Update(self.instance, feedback_fn)
7989

    
7990
    cstep = 5
7991
    if self.early_release:
7992
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7993
      cstep += 1
7994
      self._RemoveOldStorage(self.target_node, iv_names)
7995
      # WARNING: we release both node locks here, do not do other RPCs
7996
      # than WaitForSync to the primary node
7997
      self._ReleaseNodeLock([self.target_node, self.other_node])
7998

    
7999
    # Wait for sync
8000
    # This can fail as the old devices are degraded and _WaitForSync
8001
    # does a combined result over all disks, so we don't check its return value
8002
    self.lu.LogStep(cstep, steps_total, "Sync devices")
8003
    cstep += 1
8004
    _WaitForSync(self.lu, self.instance)
8005

    
8006
    # Check all devices manually
8007
    self._CheckDevices(self.instance.primary_node, iv_names)
8008

    
8009
    # Step: remove old storage
8010
    if not self.early_release:
8011
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8012
      cstep += 1
8013
      self._RemoveOldStorage(self.target_node, iv_names)
8014

    
8015
  def _ExecDrbd8Secondary(self, feedback_fn):
8016
    """Replace the secondary node for DRBD 8.
8017

8018
    The algorithm for replace is quite complicated:
8019
      - for all disks of the instance:
8020
        - create new LVs on the new node with same names
8021
        - shutdown the drbd device on the old secondary
8022
        - disconnect the drbd network on the primary
8023
        - create the drbd device on the new secondary
8024
        - network attach the drbd on the primary, using an artifice:
8025
          the drbd code for Attach() will connect to the network if it
8026
          finds a device which is connected to the good local disks but
8027
          not network enabled
8028
      - wait for sync across all devices
8029
      - remove all disks from the old secondary
8030

8031
    Failures are not very well handled.
8032

8033
    """
8034
    steps_total = 6
8035

    
8036
    # Step: check device activation
8037
    self.lu.LogStep(1, steps_total, "Check device existence")
8038
    self._CheckDisksExistence([self.instance.primary_node])
8039
    self._CheckVolumeGroup([self.instance.primary_node])
8040

    
8041
    # Step: check other node consistency
8042
    self.lu.LogStep(2, steps_total, "Check peer consistency")
8043
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
8044

    
8045
    # Step: create new storage
8046
    self.lu.LogStep(3, steps_total, "Allocate new storage")
8047
    for idx, dev in enumerate(self.instance.disks):
8048
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
8049
                      (self.new_node, idx))
8050
      # we pass force_create=True to force LVM creation
8051
      for new_lv in dev.children:
8052
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
8053
                        _GetInstanceInfoText(self.instance), False)
8054

    
8055
    # Step 4: dbrd minors and drbd setups changes
8056
    # after this, we must manually remove the drbd minors on both the
8057
    # error and the success paths
8058
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
8059
    minors = self.cfg.AllocateDRBDMinor([self.new_node
8060
                                         for dev in self.instance.disks],
8061
                                        self.instance.name)
8062
    logging.debug("Allocated minors %r", minors)
8063

    
8064
    iv_names = {}
8065
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
8066
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
8067
                      (self.new_node, idx))
8068
      # create new devices on new_node; note that we create two IDs:
8069
      # one without port, so the drbd will be activated without
8070
      # networking information on the new node at this stage, and one
8071
      # with network, for the latter activation in step 4
8072
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
8073
      if self.instance.primary_node == o_node1:
8074
        p_minor = o_minor1
8075
      else:
8076
        assert self.instance.primary_node == o_node2, "Three-node instance?"
8077
        p_minor = o_minor2
8078

    
8079
      new_alone_id = (self.instance.primary_node, self.new_node, None,
8080
                      p_minor, new_minor, o_secret)
8081
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
8082
                    p_minor, new_minor, o_secret)
8083

    
8084
      iv_names[idx] = (dev, dev.children, new_net_id)
8085
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
8086
                    new_net_id)
8087
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
8088
                              logical_id=new_alone_id,
8089
                              children=dev.children,
8090
                              size=dev.size)
8091
      try:
8092
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
8093
                              _GetInstanceInfoText(self.instance), False)
8094
      except errors.GenericError:
8095
        self.cfg.ReleaseDRBDMinors(self.instance.name)
8096
        raise
8097

    
8098
    # We have new devices, shutdown the drbd on the old secondary
8099
    for idx, dev in enumerate(self.instance.disks):
8100
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
8101
      self.cfg.SetDiskID(dev, self.target_node)
8102
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
8103
      if msg:
8104
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
8105
                           "node: %s" % (idx, msg),
8106
                           hint=("Please cleanup this device manually as"
8107
                                 " soon as possible"))
8108

    
8109
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
8110
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
8111
                                               self.node_secondary_ip,
8112
                                               self.instance.disks)\
8113
                                              [self.instance.primary_node]
8114

    
8115
    msg = result.fail_msg
8116
    if msg:
8117
      # detaches didn't succeed (unlikely)
8118
      self.cfg.ReleaseDRBDMinors(self.instance.name)
8119
      raise errors.OpExecError("Can't detach the disks from the network on"
8120
                               " old node: %s" % (msg,))
8121

    
8122
    # if we managed to detach at least one, we update all the disks of
8123
    # the instance to point to the new secondary
8124
    self.lu.LogInfo("Updating instance configuration")
8125
    for dev, _, new_logical_id in iv_names.itervalues():
8126
      dev.logical_id = new_logical_id
8127
      self.cfg.SetDiskID(dev, self.instance.primary_node)
8128

    
8129
    self.cfg.Update(self.instance, feedback_fn)
8130

    
8131
    # and now perform the drbd attach
8132
    self.lu.LogInfo("Attaching primary drbds to new secondary"
8133
                    " (standalone => connected)")
8134
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
8135
                                            self.new_node],
8136
                                           self.node_secondary_ip,
8137
                                           self.instance.disks,
8138
                                           self.instance.name,
8139
                                           False)
8140
    for to_node, to_result in result.items():
8141
      msg = to_result.fail_msg
8142
      if msg:
8143
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
8144
                           to_node, msg,
8145
                           hint=("please do a gnt-instance info to see the"
8146
                                 " status of disks"))
8147
    cstep = 5
8148
    if self.early_release:
8149
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8150
      cstep += 1
8151
      self._RemoveOldStorage(self.target_node, iv_names)
8152
      # WARNING: we release all node locks here, do not do other RPCs
8153
      # than WaitForSync to the primary node
8154
      self._ReleaseNodeLock([self.instance.primary_node,
8155
                             self.target_node,
8156
                             self.new_node])
8157

    
8158
    # Wait for sync
8159
    # This can fail as the old devices are degraded and _WaitForSync
8160
    # does a combined result over all disks, so we don't check its return value
8161
    self.lu.LogStep(cstep, steps_total, "Sync devices")
8162
    cstep += 1
8163
    _WaitForSync(self.lu, self.instance)
8164

    
8165
    # Check all devices manually
8166
    self._CheckDevices(self.instance.primary_node, iv_names)
8167

    
8168
    # Step: remove old storage
8169
    if not self.early_release:
8170
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
8171
      self._RemoveOldStorage(self.target_node, iv_names)
8172

    
8173

    
8174
class LURepairNodeStorage(NoHooksLU):
8175
  """Repairs the volume group on a node.
8176

8177
  """
8178
  _OP_PARAMS = [
8179
    _PNodeName,
8180
    ("storage_type", _NoDefault, _CheckStorageType),
8181
    ("name", _NoDefault, _TNonEmptyString),
8182
    ("ignore_consistency", False, _TBool),
8183
    ]
8184
  REQ_BGL = False
8185

    
8186
  def CheckArguments(self):
8187
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
8188

    
8189
    storage_type = self.op.storage_type
8190

    
8191
    if (constants.SO_FIX_CONSISTENCY not in
8192
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
8193
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
8194
                                 " repaired" % storage_type,
8195
                                 errors.ECODE_INVAL)
8196

    
8197
  def ExpandNames(self):
8198
    self.needed_locks = {
8199
      locking.LEVEL_NODE: [self.op.node_name],
8200
      }
8201

    
8202
  def _CheckFaultyDisks(self, instance, node_name):
8203
    """Ensure faulty disks abort the opcode or at least warn."""
8204
    try:
8205
      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
8206
                                  node_name, True):
8207
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
8208
                                   " node '%s'" % (instance.name, node_name),
8209
                                   errors.ECODE_STATE)
8210
    except errors.OpPrereqError, err:
8211
      if self.op.ignore_consistency:
8212
        self.proc.LogWarning(str(err.args[0]))
8213
      else:
8214
        raise
8215

    
8216
  def CheckPrereq(self):
8217
    """Check prerequisites.
8218

8219
    """
8220
    # Check whether any instance on this node has faulty disks
8221
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
8222
      if not inst.admin_up:
8223
        continue
8224
      check_nodes = set(inst.all_nodes)
8225
      check_nodes.discard(self.op.node_name)
8226
      for inst_node_name in check_nodes:
8227
        self._CheckFaultyDisks(inst, inst_node_name)
8228

    
8229
  def Exec(self, feedback_fn):
8230
    feedback_fn("Repairing storage unit '%s' on %s ..." %
8231
                (self.op.name, self.op.node_name))
8232

    
8233
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
8234
    result = self.rpc.call_storage_execute(self.op.node_name,
8235
                                           self.op.storage_type, st_args,
8236
                                           self.op.name,
8237
                                           constants.SO_FIX_CONSISTENCY)
8238
    result.Raise("Failed to repair storage unit '%s' on %s" %
8239
                 (self.op.name, self.op.node_name))
8240

    
8241

    
8242
class LUNodeEvacuationStrategy(NoHooksLU):
8243
  """Computes the node evacuation strategy.
8244

8245
  """
8246
  _OP_PARAMS = [
8247
    ("nodes", _NoDefault, _TListOf(_TNonEmptyString)),
8248
    ("remote_node", None, _TMaybeString),
8249
    ("iallocator", None, _TMaybeString),
8250
    ]
8251
  REQ_BGL = False
8252

    
8253
  def CheckArguments(self):
8254
    _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
8255

    
8256
  def ExpandNames(self):
8257
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
8258
    self.needed_locks = locks = {}
8259
    if self.op.remote_node is None:
8260
      locks[locking.LEVEL_NODE] = locking.ALL_SET
8261
    else:
8262
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8263
      locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
8264

    
8265
  def Exec(self, feedback_fn):
8266
    if self.op.remote_node is not None:
8267
      instances = []
8268
      for node in self.op.nodes:
8269
        instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
8270
      result = []
8271
      for i in instances:
8272
        if i.primary_node == self.op.remote_node:
8273
          raise errors.OpPrereqError("Node %s is the primary node of"
8274
                                     " instance %s, cannot use it as"
8275
                                     " secondary" %
8276
                                     (self.op.remote_node, i.name),
8277
                                     errors.ECODE_INVAL)
8278
        result.append([i.name, self.op.remote_node])
8279
    else:
8280
      ial = IAllocator(self.cfg, self.rpc,
8281
                       mode=constants.IALLOCATOR_MODE_MEVAC,
8282
                       evac_nodes=self.op.nodes)
8283
      ial.Run(self.op.iallocator, validate=True)
8284
      if not ial.success:
8285
        raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
8286
                                 errors.ECODE_NORES)
8287
      result = ial.result
8288
    return result
8289

    
8290

    
8291
class LUGrowDisk(LogicalUnit):
8292
  """Grow a disk of an instance.
8293

8294
  """
8295
  HPATH = "disk-grow"
8296
  HTYPE = constants.HTYPE_INSTANCE
8297
  _OP_PARAMS = [
8298
    _PInstanceName,
8299
    ("disk", _NoDefault, _TInt),
8300
    ("amount", _NoDefault, _TInt),
8301
    ("wait_for_sync", True, _TBool),
8302
    ]
8303
  REQ_BGL = False
8304

    
8305
  def ExpandNames(self):
8306
    self._ExpandAndLockInstance()
8307
    self.needed_locks[locking.LEVEL_NODE] = []
8308
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8309

    
8310
  def DeclareLocks(self, level):
8311
    if level == locking.LEVEL_NODE:
8312
      self._LockInstancesNodes()
8313

    
8314
  def BuildHooksEnv(self):
8315
    """Build hooks env.
8316

8317
    This runs on the master, the primary and all the secondaries.
8318

8319
    """
8320
    env = {
8321
      "DISK": self.op.disk,
8322
      "AMOUNT": self.op.amount,
8323
      }
8324
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
8325
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
8326
    return env, nl, nl
8327

    
8328
  def CheckPrereq(self):
8329
    """Check prerequisites.
8330

8331
    This checks that the instance is in the cluster.
8332

8333
    """
8334
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8335
    assert instance is not None, \
8336
      "Cannot retrieve locked instance %s" % self.op.instance_name
8337
    nodenames = list(instance.all_nodes)
8338
    for node in nodenames:
8339
      _CheckNodeOnline(self, node)
8340

    
8341
    self.instance = instance
8342

    
8343
    if instance.disk_template not in constants.DTS_GROWABLE:
8344
      raise errors.OpPrereqError("Instance's disk layout does not support"
8345
                                 " growing.", errors.ECODE_INVAL)
8346

    
8347
    self.disk = instance.FindDisk(self.op.disk)
8348

    
8349
    if instance.disk_template != constants.DT_FILE:
8350
      # TODO: check the free disk space for file, when that feature will be
8351
      # supported
8352
      _CheckNodesFreeDisk(self, nodenames, self.op.amount)
8353

    
8354
  def Exec(self, feedback_fn):
8355
    """Execute disk grow.
8356

8357
    """
8358
    instance = self.instance
8359
    disk = self.disk
8360

    
8361
    disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
8362
    if not disks_ok:
8363
      raise errors.OpExecError("Cannot activate block device to grow")
8364

    
8365
    for node in instance.all_nodes:
8366
      self.cfg.SetDiskID(disk, node)
8367
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
8368
      result.Raise("Grow request failed to node %s" % node)
8369

    
8370
      # TODO: Rewrite code to work properly
8371
      # DRBD goes into sync mode for a short amount of time after executing the
8372
      # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
8373
      # calling "resize" in sync mode fails. Sleeping for a short amount of
8374
      # time is a work-around.
8375
      time.sleep(5)
8376

    
8377
    disk.RecordGrow(self.op.amount)
8378
    self.cfg.Update(instance, feedback_fn)
8379
    if self.op.wait_for_sync:
8380
      disk_abort = not _WaitForSync(self, instance, disks=[disk])
8381
      if disk_abort:
8382
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
8383
                             " status.\nPlease check the instance.")
8384
      if not instance.admin_up:
8385
        _SafeShutdownInstanceDisks(self, instance, disks=[disk])
8386
    elif not instance.admin_up:
8387
      self.proc.LogWarning("Not shutting down the disk even if the instance is"
8388
                           " not supposed to be running because no wait for"
8389
                           " sync mode was requested.")
8390

    
8391

    
8392
class LUQueryInstanceData(NoHooksLU):
8393
  """Query runtime instance data.
8394

8395
  """
8396
  _OP_PARAMS = [
8397
    ("instances", _EmptyList, _TListOf(_TNonEmptyString)),
8398
    ("static", False, _TBool),
8399
    ]
8400
  REQ_BGL = False
8401

    
8402
  def ExpandNames(self):
8403
    self.needed_locks = {}
8404
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
8405

    
8406
    if self.op.instances:
8407
      self.wanted_names = []
8408
      for name in self.op.instances:
8409
        full_name = _ExpandInstanceName(self.cfg, name)
8410
        self.wanted_names.append(full_name)
8411
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
8412
    else:
8413
      self.wanted_names = None
8414
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
8415

    
8416
    self.needed_locks[locking.LEVEL_NODE] = []
8417
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8418

    
8419
  def DeclareLocks(self, level):
8420
    if level == locking.LEVEL_NODE:
8421
      self._LockInstancesNodes()
8422

    
8423
  def CheckPrereq(self):
8424
    """Check prerequisites.
8425

8426
    This only checks the optional instance list against the existing names.
8427

8428
    """
8429
    if self.wanted_names is None:
8430
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
8431

    
8432
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
8433
                             in self.wanted_names]
8434

    
8435
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
8436
    """Returns the status of a block device
8437

8438
    """
8439
    if self.op.static or not node:
8440
      return None
8441

    
8442
    self.cfg.SetDiskID(dev, node)
8443

    
8444
    result = self.rpc.call_blockdev_find(node, dev)
8445
    if result.offline:
8446
      return None
8447

    
8448
    result.Raise("Can't compute disk status for %s" % instance_name)
8449

    
8450
    status = result.payload
8451
    if status is None:
8452
      return None
8453

    
8454
    return (status.dev_path, status.major, status.minor,
8455
            status.sync_percent, status.estimated_time,
8456
            status.is_degraded, status.ldisk_status)
8457

    
8458
  def _ComputeDiskStatus(self, instance, snode, dev):
8459
    """Compute block device status.
8460

8461
    """
8462
    if dev.dev_type in constants.LDS_DRBD:
8463
      # we change the snode then (otherwise we use the one passed in)
8464
      if dev.logical_id[0] == instance.primary_node:
8465
        snode = dev.logical_id[1]
8466
      else:
8467
        snode = dev.logical_id[0]
8468

    
8469
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
8470
                                              instance.name, dev)
8471
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
8472

    
8473
    if dev.children:
8474
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
8475
                      for child in dev.children]
8476
    else:
8477
      dev_children = []
8478

    
8479
    data = {
8480
      "iv_name": dev.iv_name,
8481
      "dev_type": dev.dev_type,
8482
      "logical_id": dev.logical_id,
8483
      "physical_id": dev.physical_id,
8484
      "pstatus": dev_pstatus,
8485
      "sstatus": dev_sstatus,
8486
      "children": dev_children,
8487
      "mode": dev.mode,
8488
      "size": dev.size,
8489
      }
8490

    
8491
    return data
8492

    
8493
  def Exec(self, feedback_fn):
8494
    """Gather and return data"""
8495
    result = {}
8496

    
8497
    cluster = self.cfg.GetClusterInfo()
8498

    
8499
    for instance in self.wanted_instances:
8500
      if not self.op.static:
8501
        remote_info = self.rpc.call_instance_info(instance.primary_node,
8502
                                                  instance.name,
8503
                                                  instance.hypervisor)
8504
        remote_info.Raise("Error checking node %s" % instance.primary_node)
8505
        remote_info = remote_info.payload
8506
        if remote_info and "state" in remote_info:
8507
          remote_state = "up"
8508
        else:
8509
          remote_state = "down"
8510
      else:
8511
        remote_state = None
8512
      if instance.admin_up:
8513
        config_state = "up"
8514
      else:
8515
        config_state = "down"
8516

    
8517
      disks = [self._ComputeDiskStatus(instance, None, device)
8518
               for device in instance.disks]
8519

    
8520
      idict = {
8521
        "name": instance.name,
8522
        "config_state": config_state,
8523
        "run_state": remote_state,
8524
        "pnode": instance.primary_node,
8525
        "snodes": instance.secondary_nodes,
8526
        "os": instance.os,
8527
        # this happens to be the same format used for hooks
8528
        "nics": _NICListToTuple(self, instance.nics),
8529
        "disk_template": instance.disk_template,
8530
        "disks": disks,
8531
        "hypervisor": instance.hypervisor,
8532
        "network_port": instance.network_port,
8533
        "hv_instance": instance.hvparams,
8534
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
8535
        "be_instance": instance.beparams,
8536
        "be_actual": cluster.FillBE(instance),
8537
        "os_instance": instance.osparams,
8538
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
8539
        "serial_no": instance.serial_no,
8540
        "mtime": instance.mtime,
8541
        "ctime": instance.ctime,
8542
        "uuid": instance.uuid,
8543
        }
8544

    
8545
      result[instance.name] = idict
8546

    
8547
    return result
8548

    
8549

    
8550
class LUSetInstanceParams(LogicalUnit):
8551
  """Modifies an instances's parameters.
8552

8553
  """
8554
  HPATH = "instance-modify"
8555
  HTYPE = constants.HTYPE_INSTANCE
8556
  _OP_PARAMS = [
8557
    _PInstanceName,
8558
    ("nics", _EmptyList, _TList),
8559
    ("disks", _EmptyList, _TList),
8560
    ("beparams", _EmptyDict, _TDict),
8561
    ("hvparams", _EmptyDict, _TDict),
8562
    ("disk_template", None, _TMaybeString),
8563
    ("remote_node", None, _TMaybeString),
8564
    ("os_name", None, _TMaybeString),
8565
    ("force_variant", False, _TBool),
8566
    ("osparams", None, _TOr(_TDict, _TNone)),
8567
    _PForce,
8568
    ]
8569
  REQ_BGL = False
8570

    
8571
  def CheckArguments(self):
8572
    if not (self.op.nics or self.op.disks or self.op.disk_template or
8573
            self.op.hvparams or self.op.beparams or self.op.os_name):
8574
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
8575

    
8576
    if self.op.hvparams:
8577
      _CheckGlobalHvParams(self.op.hvparams)
8578

    
8579
    # Disk validation
8580
    disk_addremove = 0
8581
    for disk_op, disk_dict in self.op.disks:
8582
      utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
8583
      if disk_op == constants.DDM_REMOVE:
8584
        disk_addremove += 1
8585
        continue
8586
      elif disk_op == constants.DDM_ADD:
8587
        disk_addremove += 1
8588
      else:
8589
        if not isinstance(disk_op, int):
8590
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
8591
        if not isinstance(disk_dict, dict):
8592
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
8593
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
8594

    
8595
      if disk_op == constants.DDM_ADD:
8596
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
8597
        if mode not in constants.DISK_ACCESS_SET:
8598
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
8599
                                     errors.ECODE_INVAL)
8600
        size = disk_dict.get('size', None)
8601
        if size is None:
8602
          raise errors.OpPrereqError("Required disk parameter size missing",
8603
                                     errors.ECODE_INVAL)
8604
        try:
8605
          size = int(size)
8606
        except (TypeError, ValueError), err:
8607
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
8608
                                     str(err), errors.ECODE_INVAL)
8609
        disk_dict['size'] = size
8610
      else:
8611
        # modification of disk
8612
        if 'size' in disk_dict:
8613
          raise errors.OpPrereqError("Disk size change not possible, use"
8614
                                     " grow-disk", errors.ECODE_INVAL)
8615

    
8616
    if disk_addremove > 1:
8617
      raise errors.OpPrereqError("Only one disk add or remove operation"
8618
                                 " supported at a time", errors.ECODE_INVAL)
8619

    
8620
    if self.op.disks and self.op.disk_template is not None:
8621
      raise errors.OpPrereqError("Disk template conversion and other disk"
8622
                                 " changes not supported at the same time",
8623
                                 errors.ECODE_INVAL)
8624

    
8625
    if self.op.disk_template:
8626
      _CheckDiskTemplate(self.op.disk_template)
8627
      if (self.op.disk_template in constants.DTS_NET_MIRROR and
8628
          self.op.remote_node is None):
8629
        raise errors.OpPrereqError("Changing the disk template to a mirrored"
8630
                                   " one requires specifying a secondary node",
8631
                                   errors.ECODE_INVAL)
8632

    
8633
    # NIC validation
8634
    nic_addremove = 0
8635
    for nic_op, nic_dict in self.op.nics:
8636
      utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
8637
      if nic_op == constants.DDM_REMOVE:
8638
        nic_addremove += 1
8639
        continue
8640
      elif nic_op == constants.DDM_ADD:
8641
        nic_addremove += 1
8642
      else:
8643
        if not isinstance(nic_op, int):
8644
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
8645
        if not isinstance(nic_dict, dict):
8646
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
8647
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
8648

    
8649
      # nic_dict should be a dict
8650
      nic_ip = nic_dict.get('ip', None)
8651
      if nic_ip is not None:
8652
        if nic_ip.lower() == constants.VALUE_NONE:
8653
          nic_dict['ip'] = None
8654
        else:
8655
          if not netutils.IsValidIP4(nic_ip):
8656
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
8657
                                       errors.ECODE_INVAL)
8658

    
8659
      nic_bridge = nic_dict.get('bridge', None)
8660
      nic_link = nic_dict.get('link', None)
8661
      if nic_bridge and nic_link:
8662
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
8663
                                   " at the same time", errors.ECODE_INVAL)
8664
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
8665
        nic_dict['bridge'] = None
8666
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
8667
        nic_dict['link'] = None
8668

    
8669
      if nic_op == constants.DDM_ADD:
8670
        nic_mac = nic_dict.get('mac', None)
8671
        if nic_mac is None:
8672
          nic_dict['mac'] = constants.VALUE_AUTO
8673

    
8674
      if 'mac' in nic_dict:
8675
        nic_mac = nic_dict['mac']
8676
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8677
          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
8678

    
8679
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
8680
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
8681
                                     " modifying an existing nic",
8682
                                     errors.ECODE_INVAL)
8683

    
8684
    if nic_addremove > 1:
8685
      raise errors.OpPrereqError("Only one NIC add or remove operation"
8686
                                 " supported at a time", errors.ECODE_INVAL)
8687

    
8688
  def ExpandNames(self):
8689
    self._ExpandAndLockInstance()
8690
    self.needed_locks[locking.LEVEL_NODE] = []
8691
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8692

    
8693
  def DeclareLocks(self, level):
8694
    if level == locking.LEVEL_NODE:
8695
      self._LockInstancesNodes()
8696
      if self.op.disk_template and self.op.remote_node:
8697
        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8698
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
8699

    
8700
  def BuildHooksEnv(self):
8701
    """Build hooks env.
8702

8703
    This runs on the master, primary and secondaries.
8704

8705
    """
8706
    args = dict()
8707
    if constants.BE_MEMORY in self.be_new:
8708
      args['memory'] = self.be_new[constants.BE_MEMORY]
8709
    if constants.BE_VCPUS in self.be_new:
8710
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
8711
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
8712
    # information at all.
8713
    if self.op.nics:
8714
      args['nics'] = []
8715
      nic_override = dict(self.op.nics)
8716
      for idx, nic in enumerate(self.instance.nics):
8717
        if idx in nic_override:
8718
          this_nic_override = nic_override[idx]
8719
        else:
8720
          this_nic_override = {}
8721
        if 'ip' in this_nic_override:
8722
          ip = this_nic_override['ip']
8723
        else:
8724
          ip = nic.ip
8725
        if 'mac' in this_nic_override:
8726
          mac = this_nic_override['mac']
8727
        else:
8728
          mac = nic.mac
8729
        if idx in self.nic_pnew:
8730
          nicparams = self.nic_pnew[idx]
8731
        else:
8732
          nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
8733
        mode = nicparams[constants.NIC_MODE]
8734
        link = nicparams[constants.NIC_LINK]
8735
        args['nics'].append((ip, mac, mode, link))
8736
      if constants.DDM_ADD in nic_override:
8737
        ip = nic_override[constants.DDM_ADD].get('ip', None)
8738
        mac = nic_override[constants.DDM_ADD]['mac']
8739
        nicparams = self.nic_pnew[constants.DDM_ADD]
8740
        mode = nicparams[constants.NIC_MODE]
8741
        link = nicparams[constants.NIC_LINK]
8742
        args['nics'].append((ip, mac, mode, link))
8743
      elif constants.DDM_REMOVE in nic_override:
8744
        del args['nics'][-1]
8745

    
8746
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
8747
    if self.op.disk_template:
8748
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
8749
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
8750
    return env, nl, nl
8751

    
8752
  def CheckPrereq(self):
8753
    """Check prerequisites.
8754

8755
    This only checks the instance list against the existing names.
8756

8757
    """
8758
    # checking the new params on the primary/secondary nodes
8759

    
8760
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8761
    cluster = self.cluster = self.cfg.GetClusterInfo()
8762
    assert self.instance is not None, \
8763
      "Cannot retrieve locked instance %s" % self.op.instance_name
8764
    pnode = instance.primary_node
8765
    nodelist = list(instance.all_nodes)
8766

    
8767
    # OS change
8768
    if self.op.os_name and not self.op.force:
8769
      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
8770
                      self.op.force_variant)
8771
      instance_os = self.op.os_name
8772
    else:
8773
      instance_os = instance.os
8774

    
8775
    if self.op.disk_template:
8776
      if instance.disk_template == self.op.disk_template:
8777
        raise errors.OpPrereqError("Instance already has disk template %s" %
8778
                                   instance.disk_template, errors.ECODE_INVAL)
8779

    
8780
      if (instance.disk_template,
8781
          self.op.disk_template) not in self._DISK_CONVERSIONS:
8782
        raise errors.OpPrereqError("Unsupported disk template conversion from"
8783
                                   " %s to %s" % (instance.disk_template,
8784
                                                  self.op.disk_template),
8785
                                   errors.ECODE_INVAL)
8786
      _CheckInstanceDown(self, instance, "cannot change disk template")
8787
      if self.op.disk_template in constants.DTS_NET_MIRROR:
8788
        if self.op.remote_node == pnode:
8789
          raise errors.OpPrereqError("Given new secondary node %s is the same"
8790
                                     " as the primary node of the instance" %
8791
                                     self.op.remote_node, errors.ECODE_STATE)
8792
        _CheckNodeOnline(self, self.op.remote_node)
8793
        _CheckNodeNotDrained(self, self.op.remote_node)
8794
        disks = [{"size": d.size} for d in instance.disks]
8795
        required = _ComputeDiskSize(self.op.disk_template, disks)
8796
        _CheckNodesFreeDisk(self, [self.op.remote_node], required)
8797

    
8798
    # hvparams processing
8799
    if self.op.hvparams:
8800
      hv_type = instance.hypervisor
8801
      i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
8802
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
8803
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
8804

    
8805
      # local check
8806
      hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
8807
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
8808
      self.hv_new = hv_new # the new actual values
8809
      self.hv_inst = i_hvdict # the new dict (without defaults)
8810
    else:
8811
      self.hv_new = self.hv_inst = {}
8812

    
8813
    # beparams processing
8814
    if self.op.beparams:
8815
      i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
8816
                                   use_none=True)
8817
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
8818
      be_new = cluster.SimpleFillBE(i_bedict)
8819
      self.be_new = be_new # the new actual values
8820
      self.be_inst = i_bedict # the new dict (without defaults)
8821
    else:
8822
      self.be_new = self.be_inst = {}
8823

    
8824
    # osparams processing
8825
    if self.op.osparams:
8826
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
8827
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
8828
      self.os_new = cluster.SimpleFillOS(instance_os, i_osdict)
8829
      self.os_inst = i_osdict # the new dict (without defaults)
8830
    else:
8831
      self.os_new = self.os_inst = {}
8832

    
8833
    self.warn = []
8834

    
8835
    if constants.BE_MEMORY in self.op.beparams and not self.op.force:
8836
      mem_check_list = [pnode]
8837
      if be_new[constants.BE_AUTO_BALANCE]:
8838
        # either we changed auto_balance to yes or it was from before
8839
        mem_check_list.extend(instance.secondary_nodes)
8840
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
8841
                                                  instance.hypervisor)
8842
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
8843
                                         instance.hypervisor)
8844
      pninfo = nodeinfo[pnode]
8845
      msg = pninfo.fail_msg
8846
      if msg:
8847
        # Assume the primary node is unreachable and go ahead
8848
        self.warn.append("Can't get info from primary node %s: %s" %
8849
                         (pnode,  msg))
8850
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
8851
        self.warn.append("Node data from primary node %s doesn't contain"
8852
                         " free memory information" % pnode)
8853
      elif instance_info.fail_msg:
8854
        self.warn.append("Can't get instance runtime information: %s" %
8855
                        instance_info.fail_msg)
8856
      else:
8857
        if instance_info.payload:
8858
          current_mem = int(instance_info.payload['memory'])
8859
        else:
8860
          # Assume instance not running
8861
          # (there is a slight race condition here, but it's not very probable,
8862
          # and we have no other way to check)
8863
          current_mem = 0
8864
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
8865
                    pninfo.payload['memory_free'])
8866
        if miss_mem > 0:
8867
          raise errors.OpPrereqError("This change will prevent the instance"
8868
                                     " from starting, due to %d MB of memory"
8869
                                     " missing on its primary node" % miss_mem,
8870
                                     errors.ECODE_NORES)
8871

    
8872
      if be_new[constants.BE_AUTO_BALANCE]:
8873
        for node, nres in nodeinfo.items():
8874
          if node not in instance.secondary_nodes:
8875
            continue
8876
          msg = nres.fail_msg
8877
          if msg:
8878
            self.warn.append("Can't get info from secondary node %s: %s" %
8879
                             (node, msg))
8880
          elif not isinstance(nres.payload.get('memory_free', None), int):
8881
            self.warn.append("Secondary node %s didn't return free"
8882
                             " memory information" % node)
8883
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
8884
            self.warn.append("Not enough memory to failover instance to"
8885
                             " secondary node %s" % node)
8886

    
8887
    # NIC processing
8888
    self.nic_pnew = {}
8889
    self.nic_pinst = {}
8890
    for nic_op, nic_dict in self.op.nics:
8891
      if nic_op == constants.DDM_REMOVE:
8892
        if not instance.nics:
8893
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
8894
                                     errors.ECODE_INVAL)
8895
        continue
8896
      if nic_op != constants.DDM_ADD:
8897
        # an existing nic
8898
        if not instance.nics:
8899
          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
8900
                                     " no NICs" % nic_op,
8901
                                     errors.ECODE_INVAL)
8902
        if nic_op < 0 or nic_op >= len(instance.nics):
8903
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
8904
                                     " are 0 to %d" %
8905
                                     (nic_op, len(instance.nics) - 1),
8906
                                     errors.ECODE_INVAL)
8907
        old_nic_params = instance.nics[nic_op].nicparams
8908
        old_nic_ip = instance.nics[nic_op].ip
8909
      else:
8910
        old_nic_params = {}
8911
        old_nic_ip = None
8912

    
8913
      update_params_dict = dict([(key, nic_dict[key])
8914
                                 for key in constants.NICS_PARAMETERS
8915
                                 if key in nic_dict])
8916

    
8917
      if 'bridge' in nic_dict:
8918
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
8919

    
8920
      new_nic_params = _GetUpdatedParams(old_nic_params,
8921
                                         update_params_dict)
8922
      utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
8923
      new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
8924
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
8925
      self.nic_pinst[nic_op] = new_nic_params
8926
      self.nic_pnew[nic_op] = new_filled_nic_params
8927
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
8928

    
8929
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
8930
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
8931
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
8932
        if msg:
8933
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
8934
          if self.op.force:
8935
            self.warn.append(msg)
8936
          else:
8937
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
8938
      if new_nic_mode == constants.NIC_MODE_ROUTED:
8939
        if 'ip' in nic_dict:
8940
          nic_ip = nic_dict['ip']
8941
        else:
8942
          nic_ip = old_nic_ip
8943
        if nic_ip is None:
8944
          raise errors.OpPrereqError('Cannot set the nic ip to None'
8945
                                     ' on a routed nic', errors.ECODE_INVAL)
8946
      if 'mac' in nic_dict:
8947
        nic_mac = nic_dict['mac']
8948
        if nic_mac is None:
8949
          raise errors.OpPrereqError('Cannot set the nic mac to None',
8950
                                     errors.ECODE_INVAL)
8951
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8952
          # otherwise generate the mac
8953
          nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
8954
        else:
8955
          # or validate/reserve the current one
8956
          try:
8957
            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
8958
          except errors.ReservationError:
8959
            raise errors.OpPrereqError("MAC address %s already in use"
8960
                                       " in cluster" % nic_mac,
8961
                                       errors.ECODE_NOTUNIQUE)
8962

    
8963
    # DISK processing
8964
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
8965
      raise errors.OpPrereqError("Disk operations not supported for"
8966
                                 " diskless instances",
8967
                                 errors.ECODE_INVAL)
8968
    for disk_op, _ in self.op.disks:
8969
      if disk_op == constants.DDM_REMOVE:
8970
        if len(instance.disks) == 1:
8971
          raise errors.OpPrereqError("Cannot remove the last disk of"
8972
                                     " an instance", errors.ECODE_INVAL)
8973
        _CheckInstanceDown(self, instance, "cannot remove disks")
8974

    
8975
      if (disk_op == constants.DDM_ADD and
8976
          len(instance.nics) >= constants.MAX_DISKS):
8977
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
8978
                                   " add more" % constants.MAX_DISKS,
8979
                                   errors.ECODE_STATE)
8980
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
8981
        # an existing disk
8982
        if disk_op < 0 or disk_op >= len(instance.disks):
8983
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
8984
                                     " are 0 to %d" %
8985
                                     (disk_op, len(instance.disks)),
8986
                                     errors.ECODE_INVAL)
8987

    
8988
    return
8989

    
8990
  def _ConvertPlainToDrbd(self, feedback_fn):
8991
    """Converts an instance from plain to drbd.
8992

8993
    """
8994
    feedback_fn("Converting template to drbd")
8995
    instance = self.instance
8996
    pnode = instance.primary_node
8997
    snode = self.op.remote_node
8998

    
8999
    # create a fake disk info for _GenerateDiskTemplate
9000
    disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
9001
    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
9002
                                      instance.name, pnode, [snode],
9003
                                      disk_info, None, None, 0)
9004
    info = _GetInstanceInfoText(instance)
9005
    feedback_fn("Creating aditional volumes...")
9006
    # first, create the missing data and meta devices
9007
    for disk in new_disks:
9008
      # unfortunately this is... not too nice
9009
      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
9010
                            info, True)
9011
      for child in disk.children:
9012
        _CreateSingleBlockDev(self, snode, instance, child, info, True)
9013
    # at this stage, all new LVs have been created, we can rename the
9014
    # old ones
9015
    feedback_fn("Renaming original volumes...")
9016
    rename_list = [(o, n.children[0].logical_id)
9017
                   for (o, n) in zip(instance.disks, new_disks)]
9018
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
9019
    result.Raise("Failed to rename original LVs")
9020

    
9021
    feedback_fn("Initializing DRBD devices...")
9022
    # all child devices are in place, we can now create the DRBD devices
9023
    for disk in new_disks:
9024
      for node in [pnode, snode]:
9025
        f_create = node == pnode
9026
        _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
9027

    
9028
    # at this point, the instance has been modified
9029
    instance.disk_template = constants.DT_DRBD8
9030
    instance.disks = new_disks
9031
    self.cfg.Update(instance, feedback_fn)
9032

    
9033
    # disks are created, waiting for sync
9034
    disk_abort = not _WaitForSync(self, instance)
9035
    if disk_abort:
9036
      raise errors.OpExecError("There are some degraded disks for"
9037
                               " this instance, please cleanup manually")
9038

    
9039
  def _ConvertDrbdToPlain(self, feedback_fn):
9040
    """Converts an instance from drbd to plain.
9041

9042
    """
9043
    instance = self.instance
9044
    assert len(instance.secondary_nodes) == 1
9045
    pnode = instance.primary_node
9046
    snode = instance.secondary_nodes[0]
9047
    feedback_fn("Converting template to plain")
9048

    
9049
    old_disks = instance.disks
9050
    new_disks = [d.children[0] for d in old_disks]
9051

    
9052
    # copy over size and mode
9053
    for parent, child in zip(old_disks, new_disks):
9054
      child.size = parent.size
9055
      child.mode = parent.mode
9056

    
9057
    # update instance structure
9058
    instance.disks = new_disks
9059
    instance.disk_template = constants.DT_PLAIN
9060
    self.cfg.Update(instance, feedback_fn)
9061

    
9062
    feedback_fn("Removing volumes on the secondary node...")
9063
    for disk in old_disks:
9064
      self.cfg.SetDiskID(disk, snode)
9065
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
9066
      if msg:
9067
        self.LogWarning("Could not remove block device %s on node %s,"
9068
                        " continuing anyway: %s", disk.iv_name, snode, msg)
9069

    
9070
    feedback_fn("Removing unneeded volumes on the primary node...")
9071
    for idx, disk in enumerate(old_disks):
9072
      meta = disk.children[1]
9073
      self.cfg.SetDiskID(meta, pnode)
9074
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
9075
      if msg:
9076
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
9077
                        " continuing anyway: %s", idx, pnode, msg)
9078

    
9079

    
9080
  def Exec(self, feedback_fn):
9081
    """Modifies an instance.
9082

9083
    All parameters take effect only at the next restart of the instance.
9084

9085
    """
9086
    # Process here the warnings from CheckPrereq, as we don't have a
9087
    # feedback_fn there.
9088
    for warn in self.warn:
9089
      feedback_fn("WARNING: %s" % warn)
9090

    
9091
    result = []
9092
    instance = self.instance
9093
    # disk changes
9094
    for disk_op, disk_dict in self.op.disks:
9095
      if disk_op == constants.DDM_REMOVE:
9096
        # remove the last disk
9097
        device = instance.disks.pop()
9098
        device_idx = len(instance.disks)
9099
        for node, disk in device.ComputeNodeTree(instance.primary_node):
9100
          self.cfg.SetDiskID(disk, node)
9101
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
9102
          if msg:
9103
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
9104
                            " continuing anyway", device_idx, node, msg)
9105
        result.append(("disk/%d" % device_idx, "remove"))
9106
      elif disk_op == constants.DDM_ADD:
9107
        # add a new disk
9108
        if instance.disk_template == constants.DT_FILE:
9109
          file_driver, file_path = instance.disks[0].logical_id
9110
          file_path = os.path.dirname(file_path)
9111
        else:
9112
          file_driver = file_path = None
9113
        disk_idx_base = len(instance.disks)
9114
        new_disk = _GenerateDiskTemplate(self,
9115
                                         instance.disk_template,
9116
                                         instance.name, instance.primary_node,
9117
                                         instance.secondary_nodes,
9118
                                         [disk_dict],
9119
                                         file_path,
9120
                                         file_driver,
9121
                                         disk_idx_base)[0]
9122
        instance.disks.append(new_disk)
9123
        info = _GetInstanceInfoText(instance)
9124

    
9125
        logging.info("Creating volume %s for instance %s",
9126
                     new_disk.iv_name, instance.name)
9127
        # Note: this needs to be kept in sync with _CreateDisks
9128
        #HARDCODE
9129
        for node in instance.all_nodes:
9130
          f_create = node == instance.primary_node
9131
          try:
9132
            _CreateBlockDev(self, node, instance, new_disk,
9133
                            f_create, info, f_create)
9134
          except errors.OpExecError, err:
9135
            self.LogWarning("Failed to create volume %s (%s) on"
9136
                            " node %s: %s",
9137
                            new_disk.iv_name, new_disk, node, err)
9138
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
9139
                       (new_disk.size, new_disk.mode)))
9140
      else:
9141
        # change a given disk
9142
        instance.disks[disk_op].mode = disk_dict['mode']
9143
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
9144

    
9145
    if self.op.disk_template:
9146
      r_shut = _ShutdownInstanceDisks(self, instance)
9147
      if not r_shut:
9148
        raise errors.OpExecError("Cannot shutdow instance disks, unable to"
9149
                                 " proceed with disk template conversion")
9150
      mode = (instance.disk_template, self.op.disk_template)
9151
      try:
9152
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
9153
      except:
9154
        self.cfg.ReleaseDRBDMinors(instance.name)
9155
        raise
9156
      result.append(("disk_template", self.op.disk_template))
9157

    
9158
    # NIC changes
9159
    for nic_op, nic_dict in self.op.nics:
9160
      if nic_op == constants.DDM_REMOVE:
9161
        # remove the last nic
9162
        del instance.nics[-1]
9163
        result.append(("nic.%d" % len(instance.nics), "remove"))
9164
      elif nic_op == constants.DDM_ADD:
9165
        # mac and bridge should be set, by now
9166
        mac = nic_dict['mac']
9167
        ip = nic_dict.get('ip', None)
9168
        nicparams = self.nic_pinst[constants.DDM_ADD]
9169
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
9170
        instance.nics.append(new_nic)
9171
        result.append(("nic.%d" % (len(instance.nics) - 1),
9172
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
9173
                       (new_nic.mac, new_nic.ip,
9174
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
9175
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
9176
                       )))
9177
      else:
9178
        for key in 'mac', 'ip':
9179
          if key in nic_dict:
9180
            setattr(instance.nics[nic_op], key, nic_dict[key])
9181
        if nic_op in self.nic_pinst:
9182
          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
9183
        for key, val in nic_dict.iteritems():
9184
          result.append(("nic.%s/%d" % (key, nic_op), val))
9185

    
9186
    # hvparams changes
9187
    if self.op.hvparams:
9188
      instance.hvparams = self.hv_inst
9189
      for key, val in self.op.hvparams.iteritems():
9190
        result.append(("hv/%s" % key, val))
9191

    
9192
    # beparams changes
9193
    if self.op.beparams:
9194
      instance.beparams = self.be_inst
9195
      for key, val in self.op.beparams.iteritems():
9196
        result.append(("be/%s" % key, val))
9197

    
9198
    # OS change
9199
    if self.op.os_name:
9200
      instance.os = self.op.os_name
9201

    
9202
    # osparams changes
9203
    if self.op.osparams:
9204
      instance.osparams = self.os_inst
9205
      for key, val in self.op.osparams.iteritems():
9206
        result.append(("os/%s" % key, val))
9207

    
9208
    self.cfg.Update(instance, feedback_fn)
9209

    
9210
    return result
9211

    
9212
  _DISK_CONVERSIONS = {
9213
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
9214
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
9215
    }
9216

    
9217

    
9218
class LUQueryExports(NoHooksLU):
9219
  """Query the exports list
9220

9221
  """
9222
  _OP_PARAMS = [
9223
    ("nodes", _EmptyList, _TListOf(_TNonEmptyString)),
9224
    ("use_locking", False, _TBool),
9225
    ]
9226
  REQ_BGL = False
9227

    
9228
  def ExpandNames(self):
9229
    self.needed_locks = {}
9230
    self.share_locks[locking.LEVEL_NODE] = 1
9231
    if not self.op.nodes:
9232
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9233
    else:
9234
      self.needed_locks[locking.LEVEL_NODE] = \
9235
        _GetWantedNodes(self, self.op.nodes)
9236

    
9237
  def Exec(self, feedback_fn):
9238
    """Compute the list of all the exported system images.
9239

9240
    @rtype: dict
9241
    @return: a dictionary with the structure node->(export-list)
9242
        where export-list is a list of the instances exported on
9243
        that node.
9244

9245
    """
9246
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
9247
    rpcresult = self.rpc.call_export_list(self.nodes)
9248
    result = {}
9249
    for node in rpcresult:
9250
      if rpcresult[node].fail_msg:
9251
        result[node] = False
9252
      else:
9253
        result[node] = rpcresult[node].payload
9254

    
9255
    return result
9256

    
9257

    
9258
class LUPrepareExport(NoHooksLU):
9259
  """Prepares an instance for an export and returns useful information.
9260

9261
  """
9262
  _OP_PARAMS = [
9263
    _PInstanceName,
9264
    ("mode", _NoDefault, _TElemOf(constants.EXPORT_MODES)),
9265
    ]
9266
  REQ_BGL = False
9267

    
9268
  def ExpandNames(self):
9269
    self._ExpandAndLockInstance()
9270

    
9271
  def CheckPrereq(self):
9272
    """Check prerequisites.
9273

9274
    """
9275
    instance_name = self.op.instance_name
9276

    
9277
    self.instance = self.cfg.GetInstanceInfo(instance_name)
9278
    assert self.instance is not None, \
9279
          "Cannot retrieve locked instance %s" % self.op.instance_name
9280
    _CheckNodeOnline(self, self.instance.primary_node)
9281

    
9282
    self._cds = _GetClusterDomainSecret()
9283

    
9284
  def Exec(self, feedback_fn):
9285
    """Prepares an instance for an export.
9286

9287
    """
9288
    instance = self.instance
9289

    
9290
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
9291
      salt = utils.GenerateSecret(8)
9292

    
9293
      feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
9294
      result = self.rpc.call_x509_cert_create(instance.primary_node,
9295
                                              constants.RIE_CERT_VALIDITY)
9296
      result.Raise("Can't create X509 key and certificate on %s" % result.node)
9297

    
9298
      (name, cert_pem) = result.payload
9299

    
9300
      cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
9301
                                             cert_pem)
9302

    
9303
      return {
9304
        "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
9305
        "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
9306
                          salt),
9307
        "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
9308
        }
9309

    
9310
    return None
9311

    
9312

    
9313
class LUExportInstance(LogicalUnit):
9314
  """Export an instance to an image in the cluster.
9315

9316
  """
9317
  HPATH = "instance-export"
9318
  HTYPE = constants.HTYPE_INSTANCE
9319
  _OP_PARAMS = [
9320
    _PInstanceName,
9321
    ("target_node", _NoDefault, _TOr(_TNonEmptyString, _TList)),
9322
    ("shutdown", True, _TBool),
9323
    _PShutdownTimeout,
9324
    ("remove_instance", False, _TBool),
9325
    ("ignore_remove_failures", False, _TBool),
9326
    ("mode", constants.EXPORT_MODE_LOCAL, _TElemOf(constants.EXPORT_MODES)),
9327
    ("x509_key_name", None, _TOr(_TList, _TNone)),
9328
    ("destination_x509_ca", None, _TMaybeString),
9329
    ]
9330
  REQ_BGL = False
9331

    
9332
  def CheckArguments(self):
9333
    """Check the arguments.
9334

9335
    """
9336
    self.x509_key_name = self.op.x509_key_name
9337
    self.dest_x509_ca_pem = self.op.destination_x509_ca
9338

    
9339
    if self.op.remove_instance and not self.op.shutdown:
9340
      raise errors.OpPrereqError("Can not remove instance without shutting it"
9341
                                 " down before")
9342

    
9343
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
9344
      if not self.x509_key_name:
9345
        raise errors.OpPrereqError("Missing X509 key name for encryption",
9346
                                   errors.ECODE_INVAL)
9347

    
9348
      if not self.dest_x509_ca_pem:
9349
        raise errors.OpPrereqError("Missing destination X509 CA",
9350
                                   errors.ECODE_INVAL)
9351

    
9352
  def ExpandNames(self):
9353
    self._ExpandAndLockInstance()
9354

    
9355
    # Lock all nodes for local exports
9356
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9357
      # FIXME: lock only instance primary and destination node
9358
      #
9359
      # Sad but true, for now we have do lock all nodes, as we don't know where
9360
      # the previous export might be, and in this LU we search for it and
9361
      # remove it from its current node. In the future we could fix this by:
9362
      #  - making a tasklet to search (share-lock all), then create the
9363
      #    new one, then one to remove, after
9364
      #  - removing the removal operation altogether
9365
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9366

    
9367
  def DeclareLocks(self, level):
9368
    """Last minute lock declaration."""
9369
    # All nodes are locked anyway, so nothing to do here.
9370

    
9371
  def BuildHooksEnv(self):
9372
    """Build hooks env.
9373

9374
    This will run on the master, primary node and target node.
9375

9376
    """
9377
    env = {
9378
      "EXPORT_MODE": self.op.mode,
9379
      "EXPORT_NODE": self.op.target_node,
9380
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
9381
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
9382
      # TODO: Generic function for boolean env variables
9383
      "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
9384
      }
9385

    
9386
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
9387

    
9388
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
9389

    
9390
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9391
      nl.append(self.op.target_node)
9392

    
9393
    return env, nl, nl
9394

    
9395
  def CheckPrereq(self):
9396
    """Check prerequisites.
9397

9398
    This checks that the instance and node names are valid.
9399

9400
    """
9401
    instance_name = self.op.instance_name
9402

    
9403
    self.instance = self.cfg.GetInstanceInfo(instance_name)
9404
    assert self.instance is not None, \
9405
          "Cannot retrieve locked instance %s" % self.op.instance_name
9406
    _CheckNodeOnline(self, self.instance.primary_node)
9407

    
9408
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9409
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
9410
      self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
9411
      assert self.dst_node is not None
9412

    
9413
      _CheckNodeOnline(self, self.dst_node.name)
9414
      _CheckNodeNotDrained(self, self.dst_node.name)
9415

    
9416
      self._cds = None
9417
      self.dest_disk_info = None
9418
      self.dest_x509_ca = None
9419

    
9420
    elif self.op.mode == constants.EXPORT_MODE_REMOTE:
9421
      self.dst_node = None
9422

    
9423
      if len(self.op.target_node) != len(self.instance.disks):
9424
        raise errors.OpPrereqError(("Received destination information for %s"
9425
                                    " disks, but instance %s has %s disks") %
9426
                                   (len(self.op.target_node), instance_name,
9427
                                    len(self.instance.disks)),
9428
                                   errors.ECODE_INVAL)
9429

    
9430
      cds = _GetClusterDomainSecret()
9431

    
9432
      # Check X509 key name
9433
      try:
9434
        (key_name, hmac_digest, hmac_salt) = self.x509_key_name
9435
      except (TypeError, ValueError), err:
9436
        raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
9437

    
9438
      if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
9439
        raise errors.OpPrereqError("HMAC for X509 key name is wrong",
9440
                                   errors.ECODE_INVAL)
9441

    
9442
      # Load and verify CA
9443
      try:
9444
        (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
9445
      except OpenSSL.crypto.Error, err:
9446
        raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
9447
                                   (err, ), errors.ECODE_INVAL)
9448

    
9449
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
9450
      if errcode is not None:
9451
        raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
9452
                                   (msg, ), errors.ECODE_INVAL)
9453

    
9454
      self.dest_x509_ca = cert
9455

    
9456
      # Verify target information
9457
      disk_info = []
9458
      for idx, disk_data in enumerate(self.op.target_node):
9459
        try:
9460
          (host, port, magic) = \
9461
            masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
9462
        except errors.GenericError, err:
9463
          raise errors.OpPrereqError("Target info for disk %s: %s" %
9464
                                     (idx, err), errors.ECODE_INVAL)
9465

    
9466
        disk_info.append((host, port, magic))
9467

    
9468
      assert len(disk_info) == len(self.op.target_node)
9469
      self.dest_disk_info = disk_info
9470

    
9471
    else:
9472
      raise errors.ProgrammerError("Unhandled export mode %r" %
9473
                                   self.op.mode)
9474

    
9475
    # instance disk type verification
9476
    # TODO: Implement export support for file-based disks
9477
    for disk in self.instance.disks:
9478
      if disk.dev_type == constants.LD_FILE:
9479
        raise errors.OpPrereqError("Export not supported for instances with"
9480
                                   " file-based disks", errors.ECODE_INVAL)
9481

    
9482
  def _CleanupExports(self, feedback_fn):
9483
    """Removes exports of current instance from all other nodes.
9484

9485
    If an instance in a cluster with nodes A..D was exported to node C, its
9486
    exports will be removed from the nodes A, B and D.
9487

9488
    """
9489
    assert self.op.mode != constants.EXPORT_MODE_REMOTE
9490

    
9491
    nodelist = self.cfg.GetNodeList()
9492
    nodelist.remove(self.dst_node.name)
9493

    
9494
    # on one-node clusters nodelist will be empty after the removal
9495
    # if we proceed the backup would be removed because OpQueryExports
9496
    # substitutes an empty list with the full cluster node list.
9497
    iname = self.instance.name
9498
    if nodelist:
9499
      feedback_fn("Removing old exports for instance %s" % iname)
9500
      exportlist = self.rpc.call_export_list(nodelist)
9501
      for node in exportlist:
9502
        if exportlist[node].fail_msg:
9503
          continue
9504
        if iname in exportlist[node].payload:
9505
          msg = self.rpc.call_export_remove(node, iname).fail_msg
9506
          if msg:
9507
            self.LogWarning("Could not remove older export for instance %s"
9508
                            " on node %s: %s", iname, node, msg)
9509

    
9510
  def Exec(self, feedback_fn):
9511
    """Export an instance to an image in the cluster.
9512

9513
    """
9514
    assert self.op.mode in constants.EXPORT_MODES
9515

    
9516
    instance = self.instance
9517
    src_node = instance.primary_node
9518

    
9519
    if self.op.shutdown:
9520
      # shutdown the instance, but not the disks
9521
      feedback_fn("Shutting down instance %s" % instance.name)
9522
      result = self.rpc.call_instance_shutdown(src_node, instance,
9523
                                               self.op.shutdown_timeout)
9524
      # TODO: Maybe ignore failures if ignore_remove_failures is set
9525
      result.Raise("Could not shutdown instance %s on"
9526
                   " node %s" % (instance.name, src_node))
9527

    
9528
    # set the disks ID correctly since call_instance_start needs the
9529
    # correct drbd minor to create the symlinks
9530
    for disk in instance.disks:
9531
      self.cfg.SetDiskID(disk, src_node)
9532

    
9533
    activate_disks = (not instance.admin_up)
9534

    
9535
    if activate_disks:
9536
      # Activate the instance disks if we'exporting a stopped instance
9537
      feedback_fn("Activating disks for %s" % instance.name)
9538
      _StartInstanceDisks(self, instance, None)
9539

    
9540
    try:
9541
      helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
9542
                                                     instance)
9543

    
9544
      helper.CreateSnapshots()
9545
      try:
9546
        if (self.op.shutdown and instance.admin_up and
9547
            not self.op.remove_instance):
9548
          assert not activate_disks
9549
          feedback_fn("Starting instance %s" % instance.name)
9550
          result = self.rpc.call_instance_start(src_node, instance, None, None)
9551
          msg = result.fail_msg
9552
          if msg:
9553
            feedback_fn("Failed to start instance: %s" % msg)
9554
            _ShutdownInstanceDisks(self, instance)
9555
            raise errors.OpExecError("Could not start instance: %s" % msg)
9556

    
9557
        if self.op.mode == constants.EXPORT_MODE_LOCAL:
9558
          (fin_resu, dresults) = helper.LocalExport(self.dst_node)
9559
        elif self.op.mode == constants.EXPORT_MODE_REMOTE:
9560
          connect_timeout = constants.RIE_CONNECT_TIMEOUT
9561
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
9562

    
9563
          (key_name, _, _) = self.x509_key_name
9564

    
9565
          dest_ca_pem = \
9566
            OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
9567
                                            self.dest_x509_ca)
9568

    
9569
          (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
9570
                                                     key_name, dest_ca_pem,
9571
                                                     timeouts)
9572
      finally:
9573
        helper.Cleanup()
9574

    
9575
      # Check for backwards compatibility
9576
      assert len(dresults) == len(instance.disks)
9577
      assert compat.all(isinstance(i, bool) for i in dresults), \
9578
             "Not all results are boolean: %r" % dresults
9579

    
9580
    finally:
9581
      if activate_disks:
9582
        feedback_fn("Deactivating disks for %s" % instance.name)
9583
        _ShutdownInstanceDisks(self, instance)
9584

    
9585
    if not (compat.all(dresults) and fin_resu):
9586
      failures = []
9587
      if not fin_resu:
9588
        failures.append("export finalization")
9589
      if not compat.all(dresults):
9590
        fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
9591
                               if not dsk)
9592
        failures.append("disk export: disk(s) %s" % fdsk)
9593

    
9594
      raise errors.OpExecError("Export failed, errors in %s" %
9595
                               utils.CommaJoin(failures))
9596

    
9597
    # At this point, the export was successful, we can cleanup/finish
9598

    
9599
    # Remove instance if requested
9600
    if self.op.remove_instance:
9601
      feedback_fn("Removing instance %s" % instance.name)
9602
      _RemoveInstance(self, feedback_fn, instance,
9603
                      self.op.ignore_remove_failures)
9604

    
9605
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9606
      self._CleanupExports(feedback_fn)
9607

    
9608
    return fin_resu, dresults
9609

    
9610

    
9611
class LURemoveExport(NoHooksLU):
9612
  """Remove exports related to the named instance.
9613

9614
  """
9615
  _OP_PARAMS = [
9616
    _PInstanceName,
9617
    ]
9618
  REQ_BGL = False
9619

    
9620
  def ExpandNames(self):
9621
    self.needed_locks = {}
9622
    # We need all nodes to be locked in order for RemoveExport to work, but we
9623
    # don't need to lock the instance itself, as nothing will happen to it (and
9624
    # we can remove exports also for a removed instance)
9625
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9626

    
9627
  def Exec(self, feedback_fn):
9628
    """Remove any export.
9629

9630
    """
9631
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
9632
    # If the instance was not found we'll try with the name that was passed in.
9633
    # This will only work if it was an FQDN, though.
9634
    fqdn_warn = False
9635
    if not instance_name:
9636
      fqdn_warn = True
9637
      instance_name = self.op.instance_name
9638

    
9639
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
9640
    exportlist = self.rpc.call_export_list(locked_nodes)
9641
    found = False
9642
    for node in exportlist:
9643
      msg = exportlist[node].fail_msg
9644
      if msg:
9645
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
9646
        continue
9647
      if instance_name in exportlist[node].payload:
9648
        found = True
9649
        result = self.rpc.call_export_remove(node, instance_name)
9650
        msg = result.fail_msg
9651
        if msg:
9652
          logging.error("Could not remove export for instance %s"
9653
                        " on node %s: %s", instance_name, node, msg)
9654

    
9655
    if fqdn_warn and not found:
9656
      feedback_fn("Export not found. If trying to remove an export belonging"
9657
                  " to a deleted instance please use its Fully Qualified"
9658
                  " Domain Name.")
9659

    
9660

    
9661
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
9662
  """Generic tags LU.
9663

9664
  This is an abstract class which is the parent of all the other tags LUs.
9665

9666
  """
9667

    
9668
  def ExpandNames(self):
9669
    self.needed_locks = {}
9670
    if self.op.kind == constants.TAG_NODE:
9671
      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
9672
      self.needed_locks[locking.LEVEL_NODE] = self.op.name
9673
    elif self.op.kind == constants.TAG_INSTANCE:
9674
      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
9675
      self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
9676

    
9677
  def CheckPrereq(self):
9678
    """Check prerequisites.
9679

9680
    """
9681
    if self.op.kind == constants.TAG_CLUSTER:
9682
      self.target = self.cfg.GetClusterInfo()
9683
    elif self.op.kind == constants.TAG_NODE:
9684
      self.target = self.cfg.GetNodeInfo(self.op.name)
9685
    elif self.op.kind == constants.TAG_INSTANCE:
9686
      self.target = self.cfg.GetInstanceInfo(self.op.name)
9687
    else:
9688
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
9689
                                 str(self.op.kind), errors.ECODE_INVAL)
9690

    
9691

    
9692
class LUGetTags(TagsLU):
9693
  """Returns the tags of a given object.
9694

9695
  """
9696
  _OP_PARAMS = [
9697
    ("kind", _NoDefault, _TElemOf(constants.VALID_TAG_TYPES)),
9698
    ("name", _NoDefault, _TNonEmptyString),
9699
    ]
9700
  REQ_BGL = False
9701

    
9702
  def Exec(self, feedback_fn):
9703
    """Returns the tag list.
9704

9705
    """
9706
    return list(self.target.GetTags())
9707

    
9708

    
9709
class LUSearchTags(NoHooksLU):
9710
  """Searches the tags for a given pattern.
9711

9712
  """
9713
  _OP_PARAMS = [
9714
    ("pattern", _NoDefault, _TNonEmptyString),
9715
    ]
9716
  REQ_BGL = False
9717

    
9718
  def ExpandNames(self):
9719
    self.needed_locks = {}
9720

    
9721
  def CheckPrereq(self):
9722
    """Check prerequisites.
9723

9724
    This checks the pattern passed for validity by compiling it.
9725

9726
    """
9727
    try:
9728
      self.re = re.compile(self.op.pattern)
9729
    except re.error, err:
9730
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
9731
                                 (self.op.pattern, err), errors.ECODE_INVAL)
9732

    
9733
  def Exec(self, feedback_fn):
9734
    """Returns the tag list.
9735

9736
    """
9737
    cfg = self.cfg
9738
    tgts = [("/cluster", cfg.GetClusterInfo())]
9739
    ilist = cfg.GetAllInstancesInfo().values()
9740
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
9741
    nlist = cfg.GetAllNodesInfo().values()
9742
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
9743
    results = []
9744
    for path, target in tgts:
9745
      for tag in target.GetTags():
9746
        if self.re.search(tag):
9747
          results.append((path, tag))
9748
    return results
9749

    
9750

    
9751
class LUAddTags(TagsLU):
9752
  """Sets a tag on a given object.
9753

9754
  """
9755
  _OP_PARAMS = [
9756
    ("kind", _NoDefault, _TElemOf(constants.VALID_TAG_TYPES)),
9757
    ("name", _NoDefault, _TNonEmptyString),
9758
    ("tags", _NoDefault, _TListOf(_TNonEmptyString)),
9759
    ]
9760
  REQ_BGL = False
9761

    
9762
  def CheckPrereq(self):
9763
    """Check prerequisites.
9764

9765
    This checks the type and length of the tag name and value.
9766

9767
    """
9768
    TagsLU.CheckPrereq(self)
9769
    for tag in self.op.tags:
9770
      objects.TaggableObject.ValidateTag(tag)
9771

    
9772
  def Exec(self, feedback_fn):
9773
    """Sets the tag.
9774

9775
    """
9776
    try:
9777
      for tag in self.op.tags:
9778
        self.target.AddTag(tag)
9779
    except errors.TagError, err:
9780
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
9781
    self.cfg.Update(self.target, feedback_fn)
9782

    
9783

    
9784
class LUDelTags(TagsLU):
9785
  """Delete a list of tags from a given object.
9786

9787
  """
9788
  _OP_PARAMS = [
9789
    ("kind", _NoDefault, _TElemOf(constants.VALID_TAG_TYPES)),
9790
    ("name", _NoDefault, _TNonEmptyString),
9791
    ("tags", _NoDefault, _TListOf(_TNonEmptyString)),
9792
    ]
9793
  REQ_BGL = False
9794

    
9795
  def CheckPrereq(self):
9796
    """Check prerequisites.
9797

9798
    This checks that we have the given tag.
9799

9800
    """
9801
    TagsLU.CheckPrereq(self)
9802
    for tag in self.op.tags:
9803
      objects.TaggableObject.ValidateTag(tag)
9804
    del_tags = frozenset(self.op.tags)
9805
    cur_tags = self.target.GetTags()
9806
    if not del_tags <= cur_tags:
9807
      diff_tags = del_tags - cur_tags
9808
      diff_names = ["'%s'" % tag for tag in diff_tags]
9809
      diff_names.sort()
9810
      raise errors.OpPrereqError("Tag(s) %s not found" %
9811
                                 (",".join(diff_names)), errors.ECODE_NOENT)
9812

    
9813
  def Exec(self, feedback_fn):
9814
    """Remove the tag from the object.
9815

9816
    """
9817
    for tag in self.op.tags:
9818
      self.target.RemoveTag(tag)
9819
    self.cfg.Update(self.target, feedback_fn)
9820

    
9821

    
9822
class LUTestDelay(NoHooksLU):
9823
  """Sleep for a specified amount of time.
9824

9825
  This LU sleeps on the master and/or nodes for a specified amount of
9826
  time.
9827

9828
  """
9829
  _OP_PARAMS = [
9830
    ("duration", _NoDefault, _TFloat),
9831
    ("on_master", True, _TBool),
9832
    ("on_nodes", _EmptyList, _TListOf(_TNonEmptyString)),
9833
    ("repeat", 0, _TPositiveInt)
9834
    ]
9835
  REQ_BGL = False
9836

    
9837
  def ExpandNames(self):
9838
    """Expand names and set required locks.
9839

9840
    This expands the node list, if any.
9841

9842
    """
9843
    self.needed_locks = {}
9844
    if self.op.on_nodes:
9845
      # _GetWantedNodes can be used here, but is not always appropriate to use
9846
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
9847
      # more information.
9848
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
9849
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
9850

    
9851
  def _TestDelay(self):
9852
    """Do the actual sleep.
9853

9854
    """
9855
    if self.op.on_master:
9856
      if not utils.TestDelay(self.op.duration):
9857
        raise errors.OpExecError("Error during master delay test")
9858
    if self.op.on_nodes:
9859
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
9860
      for node, node_result in result.items():
9861
        node_result.Raise("Failure during rpc call to node %s" % node)
9862

    
9863
  def Exec(self, feedback_fn):
9864
    """Execute the test delay opcode, with the wanted repetitions.
9865

9866
    """
9867
    if self.op.repeat == 0:
9868
      self._TestDelay()
9869
    else:
9870
      top_value = self.op.repeat - 1
9871
      for i in range(self.op.repeat):
9872
        self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
9873
        self._TestDelay()
9874

    
9875

    
9876
class LUTestJobqueue(NoHooksLU):
9877
  """Utility LU to test some aspects of the job queue.
9878

9879
  """
9880
  _OP_PARAMS = [
9881
    ("notify_waitlock", False, _TBool),
9882
    ("notify_exec", False, _TBool),
9883
    ("log_messages", _EmptyList, _TListOf(_TString)),
9884
    ("fail", False, _TBool),
9885
    ]
9886
  REQ_BGL = False
9887

    
9888
  # Must be lower than default timeout for WaitForJobChange to see whether it
9889
  # notices changed jobs
9890
  _CLIENT_CONNECT_TIMEOUT = 20.0
9891
  _CLIENT_CONFIRM_TIMEOUT = 60.0
9892

    
9893
  @classmethod
9894
  def _NotifyUsingSocket(cls, cb, errcls):
9895
    """Opens a Unix socket and waits for another program to connect.
9896

9897
    @type cb: callable
9898
    @param cb: Callback to send socket name to client
9899
    @type errcls: class
9900
    @param errcls: Exception class to use for errors
9901

9902
    """
9903
    # Using a temporary directory as there's no easy way to create temporary
9904
    # sockets without writing a custom loop around tempfile.mktemp and
9905
    # socket.bind
9906
    tmpdir = tempfile.mkdtemp()
9907
    try:
9908
      tmpsock = utils.PathJoin(tmpdir, "sock")
9909

    
9910
      logging.debug("Creating temporary socket at %s", tmpsock)
9911
      sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
9912
      try:
9913
        sock.bind(tmpsock)
9914
        sock.listen(1)
9915

    
9916
        # Send details to client
9917
        cb(tmpsock)
9918

    
9919
        # Wait for client to connect before continuing
9920
        sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
9921
        try:
9922
          (conn, _) = sock.accept()
9923
        except socket.error, err:
9924
          raise errcls("Client didn't connect in time (%s)" % err)
9925
      finally:
9926
        sock.close()
9927
    finally:
9928
      # Remove as soon as client is connected
9929
      shutil.rmtree(tmpdir)
9930

    
9931
    # Wait for client to close
9932
    try:
9933
      try:
9934
        # pylint: disable-msg=E1101
9935
        # Instance of '_socketobject' has no ... member
9936
        conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
9937
        conn.recv(1)
9938
      except socket.error, err:
9939
        raise errcls("Client failed to confirm notification (%s)" % err)
9940
    finally:
9941
      conn.close()
9942

    
9943
  def _SendNotification(self, test, arg, sockname):
9944
    """Sends a notification to the client.
9945

9946
    @type test: string
9947
    @param test: Test name
9948
    @param arg: Test argument (depends on test)
9949
    @type sockname: string
9950
    @param sockname: Socket path
9951

9952
    """
9953
    self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
9954

    
9955
  def _Notify(self, prereq, test, arg):
9956
    """Notifies the client of a test.
9957

9958
    @type prereq: bool
9959
    @param prereq: Whether this is a prereq-phase test
9960
    @type test: string
9961
    @param test: Test name
9962
    @param arg: Test argument (depends on test)
9963

9964
    """
9965
    if prereq:
9966
      errcls = errors.OpPrereqError
9967
    else:
9968
      errcls = errors.OpExecError
9969

    
9970
    return self._NotifyUsingSocket(compat.partial(self._SendNotification,
9971
                                                  test, arg),
9972
                                   errcls)
9973

    
9974
  def CheckArguments(self):
9975
    self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
9976
    self.expandnames_calls = 0
9977

    
9978
  def ExpandNames(self):
9979
    checkargs_calls = getattr(self, "checkargs_calls", 0)
9980
    if checkargs_calls < 1:
9981
      raise errors.ProgrammerError("CheckArguments was not called")
9982

    
9983
    self.expandnames_calls += 1
9984

    
9985
    if self.op.notify_waitlock:
9986
      self._Notify(True, constants.JQT_EXPANDNAMES, None)
9987

    
9988
    self.LogInfo("Expanding names")
9989

    
9990
    # Get lock on master node (just to get a lock, not for a particular reason)
9991
    self.needed_locks = {
9992
      locking.LEVEL_NODE: self.cfg.GetMasterNode(),
9993
      }
9994

    
9995
  def Exec(self, feedback_fn):
9996
    if self.expandnames_calls < 1:
9997
      raise errors.ProgrammerError("ExpandNames was not called")
9998

    
9999
    if self.op.notify_exec:
10000
      self._Notify(False, constants.JQT_EXEC, None)
10001

    
10002
    self.LogInfo("Executing")
10003

    
10004
    if self.op.log_messages:
10005
      for idx, msg in enumerate(self.op.log_messages):
10006
        self.LogInfo("Sending log message %s", idx + 1)
10007
        feedback_fn(constants.JQT_MSGPREFIX + msg)
10008
        # Report how many test messages have been sent
10009
        self._Notify(False, constants.JQT_LOGMSG, idx + 1)
10010

    
10011
    if self.op.fail:
10012
      raise errors.OpExecError("Opcode failure was requested")
10013

    
10014
    return True
10015

    
10016

    
10017
class IAllocator(object):
10018
  """IAllocator framework.
10019

10020
  An IAllocator instance has three sets of attributes:
10021
    - cfg that is needed to query the cluster
10022
    - input data (all members of the _KEYS class attribute are required)
10023
    - four buffer attributes (in|out_data|text), that represent the
10024
      input (to the external script) in text and data structure format,
10025
      and the output from it, again in two formats
10026
    - the result variables from the script (success, info, nodes) for
10027
      easy usage
10028

10029
  """
10030
  # pylint: disable-msg=R0902
10031
  # lots of instance attributes
10032
  _ALLO_KEYS = [
10033
    "name", "mem_size", "disks", "disk_template",
10034
    "os", "tags", "nics", "vcpus", "hypervisor",
10035
    ]
10036
  _RELO_KEYS = [
10037
    "name", "relocate_from",
10038
    ]
10039
  _EVAC_KEYS = [
10040
    "evac_nodes",
10041
    ]
10042

    
10043
  def __init__(self, cfg, rpc, mode, **kwargs):
10044
    self.cfg = cfg
10045
    self.rpc = rpc
10046
    # init buffer variables
10047
    self.in_text = self.out_text = self.in_data = self.out_data = None
10048
    # init all input fields so that pylint is happy
10049
    self.mode = mode
10050
    self.mem_size = self.disks = self.disk_template = None
10051
    self.os = self.tags = self.nics = self.vcpus = None
10052
    self.hypervisor = None
10053
    self.relocate_from = None
10054
    self.name = None
10055
    self.evac_nodes = None
10056
    # computed fields
10057
    self.required_nodes = None
10058
    # init result fields
10059
    self.success = self.info = self.result = None
10060
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
10061
      keyset = self._ALLO_KEYS
10062
      fn = self._AddNewInstance
10063
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
10064
      keyset = self._RELO_KEYS
10065
      fn = self._AddRelocateInstance
10066
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
10067
      keyset = self._EVAC_KEYS
10068
      fn = self._AddEvacuateNodes
10069
    else:
10070
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
10071
                                   " IAllocator" % self.mode)
10072
    for key in kwargs:
10073
      if key not in keyset:
10074
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
10075
                                     " IAllocator" % key)
10076
      setattr(self, key, kwargs[key])
10077

    
10078
    for key in keyset:
10079
      if key not in kwargs:
10080
        raise errors.ProgrammerError("Missing input parameter '%s' to"
10081
                                     " IAllocator" % key)
10082
    self._BuildInputData(fn)
10083

    
10084
  def _ComputeClusterData(self):
10085
    """Compute the generic allocator input data.
10086

10087
    This is the data that is independent of the actual operation.
10088

10089
    """
10090
    cfg = self.cfg
10091
    cluster_info = cfg.GetClusterInfo()
10092
    # cluster data
10093
    data = {
10094
      "version": constants.IALLOCATOR_VERSION,
10095
      "cluster_name": cfg.GetClusterName(),
10096
      "cluster_tags": list(cluster_info.GetTags()),
10097
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
10098
      # we don't have job IDs
10099
      }
10100
    iinfo = cfg.GetAllInstancesInfo().values()
10101
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
10102

    
10103
    # node data
10104
    node_results = {}
10105
    node_list = cfg.GetNodeList()
10106

    
10107
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
10108
      hypervisor_name = self.hypervisor
10109
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
10110
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
10111
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
10112
      hypervisor_name = cluster_info.enabled_hypervisors[0]
10113

    
10114
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
10115
                                        hypervisor_name)
10116
    node_iinfo = \
10117
      self.rpc.call_all_instances_info(node_list,
10118
                                       cluster_info.enabled_hypervisors)
10119
    for nname, nresult in node_data.items():
10120
      # first fill in static (config-based) values
10121
      ninfo = cfg.GetNodeInfo(nname)
10122
      pnr = {
10123
        "tags": list(ninfo.GetTags()),
10124
        "primary_ip": ninfo.primary_ip,
10125
        "secondary_ip": ninfo.secondary_ip,
10126
        "offline": ninfo.offline,
10127
        "drained": ninfo.drained,
10128
        "master_candidate": ninfo.master_candidate,
10129
        }
10130

    
10131
      if not (ninfo.offline or ninfo.drained):
10132
        nresult.Raise("Can't get data for node %s" % nname)
10133
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
10134
                                nname)
10135
        remote_info = nresult.payload
10136

    
10137
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
10138
                     'vg_size', 'vg_free', 'cpu_total']:
10139
          if attr not in remote_info:
10140
            raise errors.OpExecError("Node '%s' didn't return attribute"
10141
                                     " '%s'" % (nname, attr))
10142
          if not isinstance(remote_info[attr], int):
10143
            raise errors.OpExecError("Node '%s' returned invalid value"
10144
                                     " for '%s': %s" %
10145
                                     (nname, attr, remote_info[attr]))
10146
        # compute memory used by primary instances
10147
        i_p_mem = i_p_up_mem = 0
10148
        for iinfo, beinfo in i_list:
10149
          if iinfo.primary_node == nname:
10150
            i_p_mem += beinfo[constants.BE_MEMORY]
10151
            if iinfo.name not in node_iinfo[nname].payload:
10152
              i_used_mem = 0
10153
            else:
10154
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
10155
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
10156
            remote_info['memory_free'] -= max(0, i_mem_diff)
10157

    
10158
            if iinfo.admin_up:
10159
              i_p_up_mem += beinfo[constants.BE_MEMORY]
10160

    
10161
        # compute memory used by instances
10162
        pnr_dyn = {
10163
          "total_memory": remote_info['memory_total'],
10164
          "reserved_memory": remote_info['memory_dom0'],
10165
          "free_memory": remote_info['memory_free'],
10166
          "total_disk": remote_info['vg_size'],
10167
          "free_disk": remote_info['vg_free'],
10168
          "total_cpus": remote_info['cpu_total'],
10169
          "i_pri_memory": i_p_mem,
10170
          "i_pri_up_memory": i_p_up_mem,
10171
          }
10172
        pnr.update(pnr_dyn)
10173

    
10174
      node_results[nname] = pnr
10175
    data["nodes"] = node_results
10176

    
10177
    # instance data
10178
    instance_data = {}
10179
    for iinfo, beinfo in i_list:
10180
      nic_data = []
10181
      for nic in iinfo.nics:
10182
        filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
10183
        nic_dict = {"mac": nic.mac,
10184
                    "ip": nic.ip,
10185
                    "mode": filled_params[constants.NIC_MODE],
10186
                    "link": filled_params[constants.NIC_LINK],
10187
                   }
10188
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
10189
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
10190
        nic_data.append(nic_dict)
10191
      pir = {
10192
        "tags": list(iinfo.GetTags()),
10193
        "admin_up": iinfo.admin_up,
10194
        "vcpus": beinfo[constants.BE_VCPUS],
10195
        "memory": beinfo[constants.BE_MEMORY],
10196
        "os": iinfo.os,
10197
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
10198
        "nics": nic_data,
10199
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
10200
        "disk_template": iinfo.disk_template,
10201
        "hypervisor": iinfo.hypervisor,
10202
        }
10203
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
10204
                                                 pir["disks"])
10205
      instance_data[iinfo.name] = pir
10206

    
10207
    data["instances"] = instance_data
10208

    
10209
    self.in_data = data
10210

    
10211
  def _AddNewInstance(self):
10212
    """Add new instance data to allocator structure.
10213

10214
    This in combination with _AllocatorGetClusterData will create the
10215
    correct structure needed as input for the allocator.
10216

10217
    The checks for the completeness of the opcode must have already been
10218
    done.
10219

10220
    """
10221
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
10222

    
10223
    if self.disk_template in constants.DTS_NET_MIRROR:
10224
      self.required_nodes = 2
10225
    else:
10226
      self.required_nodes = 1
10227
    request = {
10228
      "name": self.name,
10229
      "disk_template": self.disk_template,
10230
      "tags": self.tags,
10231
      "os": self.os,
10232
      "vcpus": self.vcpus,
10233
      "memory": self.mem_size,
10234
      "disks": self.disks,
10235
      "disk_space_total": disk_space,
10236
      "nics": self.nics,
10237
      "required_nodes": self.required_nodes,
10238
      }
10239
    return request
10240

    
10241
  def _AddRelocateInstance(self):
10242
    """Add relocate instance data to allocator structure.
10243

10244
    This in combination with _IAllocatorGetClusterData will create the
10245
    correct structure needed as input for the allocator.
10246

10247
    The checks for the completeness of the opcode must have already been
10248
    done.
10249

10250
    """
10251
    instance = self.cfg.GetInstanceInfo(self.name)
10252
    if instance is None:
10253
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
10254
                                   " IAllocator" % self.name)
10255

    
10256
    if instance.disk_template not in constants.DTS_NET_MIRROR:
10257
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
10258
                                 errors.ECODE_INVAL)
10259

    
10260
    if len(instance.secondary_nodes) != 1:
10261
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
10262
                                 errors.ECODE_STATE)
10263

    
10264
    self.required_nodes = 1
10265
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
10266
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
10267

    
10268
    request = {
10269
      "name": self.name,
10270
      "disk_space_total": disk_space,
10271
      "required_nodes": self.required_nodes,
10272
      "relocate_from": self.relocate_from,
10273
      }
10274
    return request
10275

    
10276
  def _AddEvacuateNodes(self):
10277
    """Add evacuate nodes data to allocator structure.
10278

10279
    """
10280
    request = {
10281
      "evac_nodes": self.evac_nodes
10282
      }
10283
    return request
10284

    
10285
  def _BuildInputData(self, fn):
10286
    """Build input data structures.
10287

10288
    """
10289
    self._ComputeClusterData()
10290

    
10291
    request = fn()
10292
    request["type"] = self.mode
10293
    self.in_data["request"] = request
10294

    
10295
    self.in_text = serializer.Dump(self.in_data)
10296

    
10297
  def Run(self, name, validate=True, call_fn=None):
10298
    """Run an instance allocator and return the results.
10299

10300
    """
10301
    if call_fn is None:
10302
      call_fn = self.rpc.call_iallocator_runner
10303

    
10304
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
10305
    result.Raise("Failure while running the iallocator script")
10306

    
10307
    self.out_text = result.payload
10308
    if validate:
10309
      self._ValidateResult()
10310

    
10311
  def _ValidateResult(self):
10312
    """Process the allocator results.
10313

10314
    This will process and if successful save the result in
10315
    self.out_data and the other parameters.
10316

10317
    """
10318
    try:
10319
      rdict = serializer.Load(self.out_text)
10320
    except Exception, err:
10321
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
10322

    
10323
    if not isinstance(rdict, dict):
10324
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
10325

    
10326
    # TODO: remove backwards compatiblity in later versions
10327
    if "nodes" in rdict and "result" not in rdict:
10328
      rdict["result"] = rdict["nodes"]
10329
      del rdict["nodes"]
10330

    
10331
    for key in "success", "info", "result":
10332
      if key not in rdict:
10333
        raise errors.OpExecError("Can't parse iallocator results:"
10334
                                 " missing key '%s'" % key)
10335
      setattr(self, key, rdict[key])
10336

    
10337
    if not isinstance(rdict["result"], list):
10338
      raise errors.OpExecError("Can't parse iallocator results: 'result' key"
10339
                               " is not a list")
10340
    self.out_data = rdict
10341

    
10342

    
10343
class LUTestAllocator(NoHooksLU):
10344
  """Run allocator tests.
10345

10346
  This LU runs the allocator tests
10347

10348
  """
10349
  _OP_PARAMS = [
10350
    ("direction", _NoDefault, _TElemOf(constants.VALID_IALLOCATOR_DIRECTIONS)),
10351
    ("mode", _NoDefault, _TElemOf(constants.VALID_IALLOCATOR_MODES)),
10352
    ("name", _NoDefault, _TNonEmptyString),
10353
    ("nics", _NoDefault, _TOr(_TNone, _TListOf(
10354
      _TDictOf(_TElemOf(["mac", "ip", "bridge"]),
10355
               _TOr(_TNone, _TNonEmptyString))))),
10356
    ("disks", _NoDefault, _TOr(_TNone, _TList)),
10357
    ("hypervisor", None, _TMaybeString),
10358
    ("allocator", None, _TMaybeString),
10359
    ("tags", _EmptyList, _TListOf(_TNonEmptyString)),
10360
    ("mem_size", None, _TOr(_TNone, _TPositiveInt)),
10361
    ("vcpus", None, _TOr(_TNone, _TPositiveInt)),
10362
    ("os", None, _TMaybeString),
10363
    ("disk_template", None, _TMaybeString),
10364
    ("evac_nodes", None, _TOr(_TNone, _TListOf(_TNonEmptyString))),
10365
    ]
10366

    
10367
  def CheckPrereq(self):
10368
    """Check prerequisites.
10369

10370
    This checks the opcode parameters depending on the director and mode test.
10371

10372
    """
10373
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
10374
      for attr in ["mem_size", "disks", "disk_template",
10375
                   "os", "tags", "nics", "vcpus"]:
10376
        if not hasattr(self.op, attr):
10377
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
10378
                                     attr, errors.ECODE_INVAL)
10379
      iname = self.cfg.ExpandInstanceName(self.op.name)
10380
      if iname is not None:
10381
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
10382
                                   iname, errors.ECODE_EXISTS)
10383
      if not isinstance(self.op.nics, list):
10384
        raise errors.OpPrereqError("Invalid parameter 'nics'",
10385
                                   errors.ECODE_INVAL)
10386
      if not isinstance(self.op.disks, list):
10387
        raise errors.OpPrereqError("Invalid parameter 'disks'",
10388
                                   errors.ECODE_INVAL)
10389
      for row in self.op.disks:
10390
        if (not isinstance(row, dict) or
10391
            "size" not in row or
10392
            not isinstance(row["size"], int) or
10393
            "mode" not in row or
10394
            row["mode"] not in ['r', 'w']):
10395
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
10396
                                     " parameter", errors.ECODE_INVAL)
10397
      if self.op.hypervisor is None:
10398
        self.op.hypervisor = self.cfg.GetHypervisorType()
10399
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
10400
      fname = _ExpandInstanceName(self.cfg, self.op.name)
10401
      self.op.name = fname
10402
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
10403
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
10404
      if not hasattr(self.op, "evac_nodes"):
10405
        raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
10406
                                   " opcode input", errors.ECODE_INVAL)
10407
    else:
10408
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
10409
                                 self.op.mode, errors.ECODE_INVAL)
10410

    
10411
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
10412
      if self.op.allocator is None:
10413
        raise errors.OpPrereqError("Missing allocator name",
10414
                                   errors.ECODE_INVAL)
10415
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
10416
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
10417
                                 self.op.direction, errors.ECODE_INVAL)
10418

    
10419
  def Exec(self, feedback_fn):
10420
    """Run the allocator test.
10421

10422
    """
10423
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
10424
      ial = IAllocator(self.cfg, self.rpc,
10425
                       mode=self.op.mode,
10426
                       name=self.op.name,
10427
                       mem_size=self.op.mem_size,
10428
                       disks=self.op.disks,
10429
                       disk_template=self.op.disk_template,
10430
                       os=self.op.os,
10431
                       tags=self.op.tags,
10432
                       nics=self.op.nics,
10433
                       vcpus=self.op.vcpus,
10434
                       hypervisor=self.op.hypervisor,
10435
                       )
10436
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
10437
      ial = IAllocator(self.cfg, self.rpc,
10438
                       mode=self.op.mode,
10439
                       name=self.op.name,
10440
                       relocate_from=list(self.relocate_from),
10441
                       )
10442
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
10443
      ial = IAllocator(self.cfg, self.rpc,
10444
                       mode=self.op.mode,
10445
                       evac_nodes=self.op.evac_nodes)
10446
    else:
10447
      raise errors.ProgrammerError("Uncatched mode %s in"
10448
                                   " LUTestAllocator.Exec", self.op.mode)
10449

    
10450
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
10451
      result = ial.in_text
10452
    else:
10453
      ial.Run(self.op.allocator, validate=False)
10454
      result = ial.out_text
10455
    return result