Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 3ddd94f9

History | View | Annotate | Download (460.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0201,C0302
25

    
26
# W0201 since most LU attributes are defined in CheckPrereq or similar
27
# functions
28

    
29
# C0302: since we have waaaay to many lines in this module
30

    
31
import os
32
import os.path
33
import time
34
import re
35
import platform
36
import logging
37
import copy
38
import OpenSSL
39
import socket
40
import tempfile
41
import shutil
42
import itertools
43
import operator
44

    
45
from ganeti import ssh
46
from ganeti import utils
47
from ganeti import errors
48
from ganeti import hypervisor
49
from ganeti import locking
50
from ganeti import constants
51
from ganeti import objects
52
from ganeti import serializer
53
from ganeti import ssconf
54
from ganeti import uidpool
55
from ganeti import compat
56
from ganeti import masterd
57
from ganeti import netutils
58
from ganeti import query
59
from ganeti import qlang
60
from ganeti import opcodes
61
from ganeti import ht
62

    
63
import ganeti.masterd.instance # pylint: disable-msg=W0611
64

    
65

    
66
def _SupportsOob(cfg, node):
67
  """Tells if node supports OOB.
68

69
  @type cfg: L{config.ConfigWriter}
70
  @param cfg: The cluster configuration
71
  @type node: L{objects.Node}
72
  @param node: The node
73
  @return: The OOB script if supported or an empty string otherwise
74

75
  """
76
  return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
77

    
78

    
79
class ResultWithJobs:
80
  """Data container for LU results with jobs.
81

82
  Instances of this class returned from L{LogicalUnit.Exec} will be recognized
83
  by L{mcpu.Processor._ProcessResult}. The latter will then submit the jobs
84
  contained in the C{jobs} attribute and include the job IDs in the opcode
85
  result.
86

87
  """
88
  def __init__(self, jobs, **kwargs):
89
    """Initializes this class.
90

91
    Additional return values can be specified as keyword arguments.
92

93
    @type jobs: list of lists of L{opcode.OpCode}
94
    @param jobs: A list of lists of opcode objects
95

96
    """
97
    self.jobs = jobs
98
    self.other = kwargs
99

    
100

    
101
class LogicalUnit(object):
102
  """Logical Unit base class.
103

104
  Subclasses must follow these rules:
105
    - implement ExpandNames
106
    - implement CheckPrereq (except when tasklets are used)
107
    - implement Exec (except when tasklets are used)
108
    - implement BuildHooksEnv
109
    - implement BuildHooksNodes
110
    - redefine HPATH and HTYPE
111
    - optionally redefine their run requirements:
112
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
113

114
  Note that all commands require root permissions.
115

116
  @ivar dry_run_result: the value (if any) that will be returned to the caller
117
      in dry-run mode (signalled by opcode dry_run parameter)
118

119
  """
120
  HPATH = None
121
  HTYPE = None
122
  REQ_BGL = True
123

    
124
  def __init__(self, processor, op, context, rpc):
125
    """Constructor for LogicalUnit.
126

127
    This needs to be overridden in derived classes in order to check op
128
    validity.
129

130
    """
131
    self.proc = processor
132
    self.op = op
133
    self.cfg = context.cfg
134
    self.glm = context.glm
135
    self.context = context
136
    self.rpc = rpc
137
    # Dicts used to declare locking needs to mcpu
138
    self.needed_locks = None
139
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
140
    self.add_locks = {}
141
    self.remove_locks = {}
142
    # Used to force good behavior when calling helper functions
143
    self.recalculate_locks = {}
144
    # logging
145
    self.Log = processor.Log # pylint: disable-msg=C0103
146
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
147
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
148
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
149
    # support for dry-run
150
    self.dry_run_result = None
151
    # support for generic debug attribute
152
    if (not hasattr(self.op, "debug_level") or
153
        not isinstance(self.op.debug_level, int)):
154
      self.op.debug_level = 0
155

    
156
    # Tasklets
157
    self.tasklets = None
158

    
159
    # Validate opcode parameters and set defaults
160
    self.op.Validate(True)
161

    
162
    self.CheckArguments()
163

    
164
  def CheckArguments(self):
165
    """Check syntactic validity for the opcode arguments.
166

167
    This method is for doing a simple syntactic check and ensure
168
    validity of opcode parameters, without any cluster-related
169
    checks. While the same can be accomplished in ExpandNames and/or
170
    CheckPrereq, doing these separate is better because:
171

172
      - ExpandNames is left as as purely a lock-related function
173
      - CheckPrereq is run after we have acquired locks (and possible
174
        waited for them)
175

176
    The function is allowed to change the self.op attribute so that
177
    later methods can no longer worry about missing parameters.
178

179
    """
180
    pass
181

    
182
  def ExpandNames(self):
183
    """Expand names for this LU.
184

185
    This method is called before starting to execute the opcode, and it should
186
    update all the parameters of the opcode to their canonical form (e.g. a
187
    short node name must be fully expanded after this method has successfully
188
    completed). This way locking, hooks, logging, etc. can work correctly.
189

190
    LUs which implement this method must also populate the self.needed_locks
191
    member, as a dict with lock levels as keys, and a list of needed lock names
192
    as values. Rules:
193

194
      - use an empty dict if you don't need any lock
195
      - if you don't need any lock at a particular level omit that level
196
      - don't put anything for the BGL level
197
      - if you want all locks at a level use locking.ALL_SET as a value
198

199
    If you need to share locks (rather than acquire them exclusively) at one
200
    level you can modify self.share_locks, setting a true value (usually 1) for
201
    that level. By default locks are not shared.
202

203
    This function can also define a list of tasklets, which then will be
204
    executed in order instead of the usual LU-level CheckPrereq and Exec
205
    functions, if those are not defined by the LU.
206

207
    Examples::
208

209
      # Acquire all nodes and one instance
210
      self.needed_locks = {
211
        locking.LEVEL_NODE: locking.ALL_SET,
212
        locking.LEVEL_INSTANCE: ['instance1.example.com'],
213
      }
214
      # Acquire just two nodes
215
      self.needed_locks = {
216
        locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
217
      }
218
      # Acquire no locks
219
      self.needed_locks = {} # No, you can't leave it to the default value None
220

221
    """
222
    # The implementation of this method is mandatory only if the new LU is
223
    # concurrent, so that old LUs don't need to be changed all at the same
224
    # time.
225
    if self.REQ_BGL:
226
      self.needed_locks = {} # Exclusive LUs don't need locks.
227
    else:
228
      raise NotImplementedError
229

    
230
  def DeclareLocks(self, level):
231
    """Declare LU locking needs for a level
232

233
    While most LUs can just declare their locking needs at ExpandNames time,
234
    sometimes there's the need to calculate some locks after having acquired
235
    the ones before. This function is called just before acquiring locks at a
236
    particular level, but after acquiring the ones at lower levels, and permits
237
    such calculations. It can be used to modify self.needed_locks, and by
238
    default it does nothing.
239

240
    This function is only called if you have something already set in
241
    self.needed_locks for the level.
242

243
    @param level: Locking level which is going to be locked
244
    @type level: member of ganeti.locking.LEVELS
245

246
    """
247

    
248
  def CheckPrereq(self):
249
    """Check prerequisites for this LU.
250

251
    This method should check that the prerequisites for the execution
252
    of this LU are fulfilled. It can do internode communication, but
253
    it should be idempotent - no cluster or system changes are
254
    allowed.
255

256
    The method should raise errors.OpPrereqError in case something is
257
    not fulfilled. Its return value is ignored.
258

259
    This method should also update all the parameters of the opcode to
260
    their canonical form if it hasn't been done by ExpandNames before.
261

262
    """
263
    if self.tasklets is not None:
264
      for (idx, tl) in enumerate(self.tasklets):
265
        logging.debug("Checking prerequisites for tasklet %s/%s",
266
                      idx + 1, len(self.tasklets))
267
        tl.CheckPrereq()
268
    else:
269
      pass
270

    
271
  def Exec(self, feedback_fn):
272
    """Execute the LU.
273

274
    This method should implement the actual work. It should raise
275
    errors.OpExecError for failures that are somewhat dealt with in
276
    code, or expected.
277

278
    """
279
    if self.tasklets is not None:
280
      for (idx, tl) in enumerate(self.tasklets):
281
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
282
        tl.Exec(feedback_fn)
283
    else:
284
      raise NotImplementedError
285

    
286
  def BuildHooksEnv(self):
287
    """Build hooks environment for this LU.
288

289
    @rtype: dict
290
    @return: Dictionary containing the environment that will be used for
291
      running the hooks for this LU. The keys of the dict must not be prefixed
292
      with "GANETI_"--that'll be added by the hooks runner. The hooks runner
293
      will extend the environment with additional variables. If no environment
294
      should be defined, an empty dictionary should be returned (not C{None}).
295
    @note: If the C{HPATH} attribute of the LU class is C{None}, this function
296
      will not be called.
297

298
    """
299
    raise NotImplementedError
300

    
301
  def BuildHooksNodes(self):
302
    """Build list of nodes to run LU's hooks.
303

304
    @rtype: tuple; (list, list)
305
    @return: Tuple containing a list of node names on which the hook
306
      should run before the execution and a list of node names on which the
307
      hook should run after the execution. No nodes should be returned as an
308
      empty list (and not None).
309
    @note: If the C{HPATH} attribute of the LU class is C{None}, this function
310
      will not be called.
311

312
    """
313
    raise NotImplementedError
314

    
315
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
316
    """Notify the LU about the results of its hooks.
317

318
    This method is called every time a hooks phase is executed, and notifies
319
    the Logical Unit about the hooks' result. The LU can then use it to alter
320
    its result based on the hooks.  By default the method does nothing and the
321
    previous result is passed back unchanged but any LU can define it if it
322
    wants to use the local cluster hook-scripts somehow.
323

324
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
325
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
326
    @param hook_results: the results of the multi-node hooks rpc call
327
    @param feedback_fn: function used send feedback back to the caller
328
    @param lu_result: the previous Exec result this LU had, or None
329
        in the PRE phase
330
    @return: the new Exec result, based on the previous result
331
        and hook results
332

333
    """
334
    # API must be kept, thus we ignore the unused argument and could
335
    # be a function warnings
336
    # pylint: disable-msg=W0613,R0201
337
    return lu_result
338

    
339
  def _ExpandAndLockInstance(self):
340
    """Helper function to expand and lock an instance.
341

342
    Many LUs that work on an instance take its name in self.op.instance_name
343
    and need to expand it and then declare the expanded name for locking. This
344
    function does it, and then updates self.op.instance_name to the expanded
345
    name. It also initializes needed_locks as a dict, if this hasn't been done
346
    before.
347

348
    """
349
    if self.needed_locks is None:
350
      self.needed_locks = {}
351
    else:
352
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
353
        "_ExpandAndLockInstance called with instance-level locks set"
354
    self.op.instance_name = _ExpandInstanceName(self.cfg,
355
                                                self.op.instance_name)
356
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
357

    
358
  def _LockInstancesNodes(self, primary_only=False):
359
    """Helper function to declare instances' nodes for locking.
360

361
    This function should be called after locking one or more instances to lock
362
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
363
    with all primary or secondary nodes for instances already locked and
364
    present in self.needed_locks[locking.LEVEL_INSTANCE].
365

366
    It should be called from DeclareLocks, and for safety only works if
367
    self.recalculate_locks[locking.LEVEL_NODE] is set.
368

369
    In the future it may grow parameters to just lock some instance's nodes, or
370
    to just lock primaries or secondary nodes, if needed.
371

372
    If should be called in DeclareLocks in a way similar to::
373

374
      if level == locking.LEVEL_NODE:
375
        self._LockInstancesNodes()
376

377
    @type primary_only: boolean
378
    @param primary_only: only lock primary nodes of locked instances
379

380
    """
381
    assert locking.LEVEL_NODE in self.recalculate_locks, \
382
      "_LockInstancesNodes helper function called with no nodes to recalculate"
383

    
384
    # TODO: check if we're really been called with the instance locks held
385

    
386
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
387
    # future we might want to have different behaviors depending on the value
388
    # of self.recalculate_locks[locking.LEVEL_NODE]
389
    wanted_nodes = []
390
    for instance_name in self.glm.list_owned(locking.LEVEL_INSTANCE):
391
      instance = self.context.cfg.GetInstanceInfo(instance_name)
392
      wanted_nodes.append(instance.primary_node)
393
      if not primary_only:
394
        wanted_nodes.extend(instance.secondary_nodes)
395

    
396
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
397
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
398
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
399
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
400

    
401
    del self.recalculate_locks[locking.LEVEL_NODE]
402

    
403

    
404
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
405
  """Simple LU which runs no hooks.
406

407
  This LU is intended as a parent for other LogicalUnits which will
408
  run no hooks, in order to reduce duplicate code.
409

410
  """
411
  HPATH = None
412
  HTYPE = None
413

    
414
  def BuildHooksEnv(self):
415
    """Empty BuildHooksEnv for NoHooksLu.
416

417
    This just raises an error.
418

419
    """
420
    raise AssertionError("BuildHooksEnv called for NoHooksLUs")
421

    
422
  def BuildHooksNodes(self):
423
    """Empty BuildHooksNodes for NoHooksLU.
424

425
    """
426
    raise AssertionError("BuildHooksNodes called for NoHooksLU")
427

    
428

    
429
class Tasklet:
430
  """Tasklet base class.
431

432
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
433
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
434
  tasklets know nothing about locks.
435

436
  Subclasses must follow these rules:
437
    - Implement CheckPrereq
438
    - Implement Exec
439

440
  """
441
  def __init__(self, lu):
442
    self.lu = lu
443

    
444
    # Shortcuts
445
    self.cfg = lu.cfg
446
    self.rpc = lu.rpc
447

    
448
  def CheckPrereq(self):
449
    """Check prerequisites for this tasklets.
450

451
    This method should check whether the prerequisites for the execution of
452
    this tasklet are fulfilled. It can do internode communication, but it
453
    should be idempotent - no cluster or system changes are allowed.
454

455
    The method should raise errors.OpPrereqError in case something is not
456
    fulfilled. Its return value is ignored.
457

458
    This method should also update all parameters to their canonical form if it
459
    hasn't been done before.
460

461
    """
462
    pass
463

    
464
  def Exec(self, feedback_fn):
465
    """Execute the tasklet.
466

467
    This method should implement the actual work. It should raise
468
    errors.OpExecError for failures that are somewhat dealt with in code, or
469
    expected.
470

471
    """
472
    raise NotImplementedError
473

    
474

    
475
class _QueryBase:
476
  """Base for query utility classes.
477

478
  """
479
  #: Attribute holding field definitions
480
  FIELDS = None
481

    
482
  def __init__(self, filter_, fields, use_locking):
483
    """Initializes this class.
484

485
    """
486
    self.use_locking = use_locking
487

    
488
    self.query = query.Query(self.FIELDS, fields, filter_=filter_,
489
                             namefield="name")
490
    self.requested_data = self.query.RequestedData()
491
    self.names = self.query.RequestedNames()
492

    
493
    # Sort only if no names were requested
494
    self.sort_by_name = not self.names
495

    
496
    self.do_locking = None
497
    self.wanted = None
498

    
499
  def _GetNames(self, lu, all_names, lock_level):
500
    """Helper function to determine names asked for in the query.
501

502
    """
503
    if self.do_locking:
504
      names = lu.glm.list_owned(lock_level)
505
    else:
506
      names = all_names
507

    
508
    if self.wanted == locking.ALL_SET:
509
      assert not self.names
510
      # caller didn't specify names, so ordering is not important
511
      return utils.NiceSort(names)
512

    
513
    # caller specified names and we must keep the same order
514
    assert self.names
515
    assert not self.do_locking or lu.glm.is_owned(lock_level)
516

    
517
    missing = set(self.wanted).difference(names)
518
    if missing:
519
      raise errors.OpExecError("Some items were removed before retrieving"
520
                               " their data: %s" % missing)
521

    
522
    # Return expanded names
523
    return self.wanted
524

    
525
  def ExpandNames(self, lu):
526
    """Expand names for this query.
527

528
    See L{LogicalUnit.ExpandNames}.
529

530
    """
531
    raise NotImplementedError()
532

    
533
  def DeclareLocks(self, lu, level):
534
    """Declare locks for this query.
535

536
    See L{LogicalUnit.DeclareLocks}.
537

538
    """
539
    raise NotImplementedError()
540

    
541
  def _GetQueryData(self, lu):
542
    """Collects all data for this query.
543

544
    @return: Query data object
545

546
    """
547
    raise NotImplementedError()
548

    
549
  def NewStyleQuery(self, lu):
550
    """Collect data and execute query.
551

552
    """
553
    return query.GetQueryResponse(self.query, self._GetQueryData(lu),
554
                                  sort_by_name=self.sort_by_name)
555

    
556
  def OldStyleQuery(self, lu):
557
    """Collect data and execute query.
558

559
    """
560
    return self.query.OldStyleQuery(self._GetQueryData(lu),
561
                                    sort_by_name=self.sort_by_name)
562

    
563

    
564
def _GetWantedNodes(lu, nodes):
565
  """Returns list of checked and expanded node names.
566

567
  @type lu: L{LogicalUnit}
568
  @param lu: the logical unit on whose behalf we execute
569
  @type nodes: list
570
  @param nodes: list of node names or None for all nodes
571
  @rtype: list
572
  @return: the list of nodes, sorted
573
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
574

575
  """
576
  if nodes:
577
    return [_ExpandNodeName(lu.cfg, name) for name in nodes]
578

    
579
  return utils.NiceSort(lu.cfg.GetNodeList())
580

    
581

    
582
def _GetWantedInstances(lu, instances):
583
  """Returns list of checked and expanded instance names.
584

585
  @type lu: L{LogicalUnit}
586
  @param lu: the logical unit on whose behalf we execute
587
  @type instances: list
588
  @param instances: list of instance names or None for all instances
589
  @rtype: list
590
  @return: the list of instances, sorted
591
  @raise errors.OpPrereqError: if the instances parameter is wrong type
592
  @raise errors.OpPrereqError: if any of the passed instances is not found
593

594
  """
595
  if instances:
596
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
597
  else:
598
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
599
  return wanted
600

    
601

    
602
def _GetUpdatedParams(old_params, update_dict,
603
                      use_default=True, use_none=False):
604
  """Return the new version of a parameter dictionary.
605

606
  @type old_params: dict
607
  @param old_params: old parameters
608
  @type update_dict: dict
609
  @param update_dict: dict containing new parameter values, or
610
      constants.VALUE_DEFAULT to reset the parameter to its default
611
      value
612
  @param use_default: boolean
613
  @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
614
      values as 'to be deleted' values
615
  @param use_none: boolean
616
  @type use_none: whether to recognise C{None} values as 'to be
617
      deleted' values
618
  @rtype: dict
619
  @return: the new parameter dictionary
620

621
  """
622
  params_copy = copy.deepcopy(old_params)
623
  for key, val in update_dict.iteritems():
624
    if ((use_default and val == constants.VALUE_DEFAULT) or
625
        (use_none and val is None)):
626
      try:
627
        del params_copy[key]
628
      except KeyError:
629
        pass
630
    else:
631
      params_copy[key] = val
632
  return params_copy
633

    
634

    
635
def _ReleaseLocks(lu, level, names=None, keep=None):
636
  """Releases locks owned by an LU.
637

638
  @type lu: L{LogicalUnit}
639
  @param level: Lock level
640
  @type names: list or None
641
  @param names: Names of locks to release
642
  @type keep: list or None
643
  @param keep: Names of locks to retain
644

645
  """
646
  assert not (keep is not None and names is not None), \
647
         "Only one of the 'names' and the 'keep' parameters can be given"
648

    
649
  if names is not None:
650
    should_release = names.__contains__
651
  elif keep:
652
    should_release = lambda name: name not in keep
653
  else:
654
    should_release = None
655

    
656
  if should_release:
657
    retain = []
658
    release = []
659

    
660
    # Determine which locks to release
661
    for name in lu.glm.list_owned(level):
662
      if should_release(name):
663
        release.append(name)
664
      else:
665
        retain.append(name)
666

    
667
    assert len(lu.glm.list_owned(level)) == (len(retain) + len(release))
668

    
669
    # Release just some locks
670
    lu.glm.release(level, names=release)
671

    
672
    assert frozenset(lu.glm.list_owned(level)) == frozenset(retain)
673
  else:
674
    # Release everything
675
    lu.glm.release(level)
676

    
677
    assert not lu.glm.is_owned(level), "No locks should be owned"
678

    
679

    
680
def _MapInstanceDisksToNodes(instances):
681
  """Creates a map from (node, volume) to instance name.
682

683
  @type instances: list of L{objects.Instance}
684
  @rtype: dict; tuple of (node name, volume name) as key, instance name as value
685

686
  """
687
  return dict(((node, vol), inst.name)
688
              for inst in instances
689
              for (node, vols) in inst.MapLVsByNode().items()
690
              for vol in vols)
691

    
692

    
693
def _RunPostHook(lu, node_name):
694
  """Runs the post-hook for an opcode on a single node.
695

696
  """
697
  hm = lu.proc.hmclass(lu.rpc.call_hooks_runner, lu)
698
  try:
699
    hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
700
  except:
701
    # pylint: disable-msg=W0702
702
    lu.LogWarning("Errors occurred running hooks on %s" % node_name)
703

    
704

    
705
def _CheckOutputFields(static, dynamic, selected):
706
  """Checks whether all selected fields are valid.
707

708
  @type static: L{utils.FieldSet}
709
  @param static: static fields set
710
  @type dynamic: L{utils.FieldSet}
711
  @param dynamic: dynamic fields set
712

713
  """
714
  f = utils.FieldSet()
715
  f.Extend(static)
716
  f.Extend(dynamic)
717

    
718
  delta = f.NonMatching(selected)
719
  if delta:
720
    raise errors.OpPrereqError("Unknown output fields selected: %s"
721
                               % ",".join(delta), errors.ECODE_INVAL)
722

    
723

    
724
def _CheckGlobalHvParams(params):
725
  """Validates that given hypervisor params are not global ones.
726

727
  This will ensure that instances don't get customised versions of
728
  global params.
729

730
  """
731
  used_globals = constants.HVC_GLOBALS.intersection(params)
732
  if used_globals:
733
    msg = ("The following hypervisor parameters are global and cannot"
734
           " be customized at instance level, please modify them at"
735
           " cluster level: %s" % utils.CommaJoin(used_globals))
736
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
737

    
738

    
739
def _CheckNodeOnline(lu, node, msg=None):
740
  """Ensure that a given node is online.
741

742
  @param lu: the LU on behalf of which we make the check
743
  @param node: the node to check
744
  @param msg: if passed, should be a message to replace the default one
745
  @raise errors.OpPrereqError: if the node is offline
746

747
  """
748
  if msg is None:
749
    msg = "Can't use offline node"
750
  if lu.cfg.GetNodeInfo(node).offline:
751
    raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
752

    
753

    
754
def _CheckNodeNotDrained(lu, node):
755
  """Ensure that a given node is not drained.
756

757
  @param lu: the LU on behalf of which we make the check
758
  @param node: the node to check
759
  @raise errors.OpPrereqError: if the node is drained
760

761
  """
762
  if lu.cfg.GetNodeInfo(node).drained:
763
    raise errors.OpPrereqError("Can't use drained node %s" % node,
764
                               errors.ECODE_STATE)
765

    
766

    
767
def _CheckNodeVmCapable(lu, node):
768
  """Ensure that a given node is vm capable.
769

770
  @param lu: the LU on behalf of which we make the check
771
  @param node: the node to check
772
  @raise errors.OpPrereqError: if the node is not vm capable
773

774
  """
775
  if not lu.cfg.GetNodeInfo(node).vm_capable:
776
    raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
777
                               errors.ECODE_STATE)
778

    
779

    
780
def _CheckNodeHasOS(lu, node, os_name, force_variant):
781
  """Ensure that a node supports a given OS.
782

783
  @param lu: the LU on behalf of which we make the check
784
  @param node: the node to check
785
  @param os_name: the OS to query about
786
  @param force_variant: whether to ignore variant errors
787
  @raise errors.OpPrereqError: if the node is not supporting the OS
788

789
  """
790
  result = lu.rpc.call_os_get(node, os_name)
791
  result.Raise("OS '%s' not in supported OS list for node %s" %
792
               (os_name, node),
793
               prereq=True, ecode=errors.ECODE_INVAL)
794
  if not force_variant:
795
    _CheckOSVariant(result.payload, os_name)
796

    
797

    
798
def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
799
  """Ensure that a node has the given secondary ip.
800

801
  @type lu: L{LogicalUnit}
802
  @param lu: the LU on behalf of which we make the check
803
  @type node: string
804
  @param node: the node to check
805
  @type secondary_ip: string
806
  @param secondary_ip: the ip to check
807
  @type prereq: boolean
808
  @param prereq: whether to throw a prerequisite or an execute error
809
  @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
810
  @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
811

812
  """
813
  result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
814
  result.Raise("Failure checking secondary ip on node %s" % node,
815
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
816
  if not result.payload:
817
    msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
818
           " please fix and re-run this command" % secondary_ip)
819
    if prereq:
820
      raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
821
    else:
822
      raise errors.OpExecError(msg)
823

    
824

    
825
def _GetClusterDomainSecret():
826
  """Reads the cluster domain secret.
827

828
  """
829
  return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
830
                               strict=True)
831

    
832

    
833
def _CheckInstanceDown(lu, instance, reason):
834
  """Ensure that an instance is not running."""
835
  if instance.admin_up:
836
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
837
                               (instance.name, reason), errors.ECODE_STATE)
838

    
839
  pnode = instance.primary_node
840
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
841
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
842
              prereq=True, ecode=errors.ECODE_ENVIRON)
843

    
844
  if instance.name in ins_l.payload:
845
    raise errors.OpPrereqError("Instance %s is running, %s" %
846
                               (instance.name, reason), errors.ECODE_STATE)
847

    
848

    
849
def _ExpandItemName(fn, name, kind):
850
  """Expand an item name.
851

852
  @param fn: the function to use for expansion
853
  @param name: requested item name
854
  @param kind: text description ('Node' or 'Instance')
855
  @return: the resolved (full) name
856
  @raise errors.OpPrereqError: if the item is not found
857

858
  """
859
  full_name = fn(name)
860
  if full_name is None:
861
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
862
                               errors.ECODE_NOENT)
863
  return full_name
864

    
865

    
866
def _ExpandNodeName(cfg, name):
867
  """Wrapper over L{_ExpandItemName} for nodes."""
868
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
869

    
870

    
871
def _ExpandInstanceName(cfg, name):
872
  """Wrapper over L{_ExpandItemName} for instance."""
873
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
874

    
875

    
876
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
877
                          memory, vcpus, nics, disk_template, disks,
878
                          bep, hvp, hypervisor_name, tags):
879
  """Builds instance related env variables for hooks
880

881
  This builds the hook environment from individual variables.
882

883
  @type name: string
884
  @param name: the name of the instance
885
  @type primary_node: string
886
  @param primary_node: the name of the instance's primary node
887
  @type secondary_nodes: list
888
  @param secondary_nodes: list of secondary nodes as strings
889
  @type os_type: string
890
  @param os_type: the name of the instance's OS
891
  @type status: boolean
892
  @param status: the should_run status of the instance
893
  @type memory: string
894
  @param memory: the memory size of the instance
895
  @type vcpus: string
896
  @param vcpus: the count of VCPUs the instance has
897
  @type nics: list
898
  @param nics: list of tuples (ip, mac, mode, link) representing
899
      the NICs the instance has
900
  @type disk_template: string
901
  @param disk_template: the disk template of the instance
902
  @type disks: list
903
  @param disks: the list of (size, mode) pairs
904
  @type bep: dict
905
  @param bep: the backend parameters for the instance
906
  @type hvp: dict
907
  @param hvp: the hypervisor parameters for the instance
908
  @type hypervisor_name: string
909
  @param hypervisor_name: the hypervisor for the instance
910
  @type tags: list
911
  @param tags: list of instance tags as strings
912
  @rtype: dict
913
  @return: the hook environment for this instance
914

915
  """
916
  if status:
917
    str_status = "up"
918
  else:
919
    str_status = "down"
920
  env = {
921
    "OP_TARGET": name,
922
    "INSTANCE_NAME": name,
923
    "INSTANCE_PRIMARY": primary_node,
924
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
925
    "INSTANCE_OS_TYPE": os_type,
926
    "INSTANCE_STATUS": str_status,
927
    "INSTANCE_MEMORY": memory,
928
    "INSTANCE_VCPUS": vcpus,
929
    "INSTANCE_DISK_TEMPLATE": disk_template,
930
    "INSTANCE_HYPERVISOR": hypervisor_name,
931
  }
932

    
933
  if nics:
934
    nic_count = len(nics)
935
    for idx, (ip, mac, mode, link) in enumerate(nics):
936
      if ip is None:
937
        ip = ""
938
      env["INSTANCE_NIC%d_IP" % idx] = ip
939
      env["INSTANCE_NIC%d_MAC" % idx] = mac
940
      env["INSTANCE_NIC%d_MODE" % idx] = mode
941
      env["INSTANCE_NIC%d_LINK" % idx] = link
942
      if mode == constants.NIC_MODE_BRIDGED:
943
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
944
  else:
945
    nic_count = 0
946

    
947
  env["INSTANCE_NIC_COUNT"] = nic_count
948

    
949
  if disks:
950
    disk_count = len(disks)
951
    for idx, (size, mode) in enumerate(disks):
952
      env["INSTANCE_DISK%d_SIZE" % idx] = size
953
      env["INSTANCE_DISK%d_MODE" % idx] = mode
954
  else:
955
    disk_count = 0
956

    
957
  env["INSTANCE_DISK_COUNT"] = disk_count
958

    
959
  if not tags:
960
    tags = []
961

    
962
  env["INSTANCE_TAGS"] = " ".join(tags)
963

    
964
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
965
    for key, value in source.items():
966
      env["INSTANCE_%s_%s" % (kind, key)] = value
967

    
968
  return env
969

    
970

    
971
def _NICListToTuple(lu, nics):
972
  """Build a list of nic information tuples.
973

974
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
975
  value in LUInstanceQueryData.
976

977
  @type lu:  L{LogicalUnit}
978
  @param lu: the logical unit on whose behalf we execute
979
  @type nics: list of L{objects.NIC}
980
  @param nics: list of nics to convert to hooks tuples
981

982
  """
983
  hooks_nics = []
984
  cluster = lu.cfg.GetClusterInfo()
985
  for nic in nics:
986
    ip = nic.ip
987
    mac = nic.mac
988
    filled_params = cluster.SimpleFillNIC(nic.nicparams)
989
    mode = filled_params[constants.NIC_MODE]
990
    link = filled_params[constants.NIC_LINK]
991
    hooks_nics.append((ip, mac, mode, link))
992
  return hooks_nics
993

    
994

    
995
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
996
  """Builds instance related env variables for hooks from an object.
997

998
  @type lu: L{LogicalUnit}
999
  @param lu: the logical unit on whose behalf we execute
1000
  @type instance: L{objects.Instance}
1001
  @param instance: the instance for which we should build the
1002
      environment
1003
  @type override: dict
1004
  @param override: dictionary with key/values that will override
1005
      our values
1006
  @rtype: dict
1007
  @return: the hook environment dictionary
1008

1009
  """
1010
  cluster = lu.cfg.GetClusterInfo()
1011
  bep = cluster.FillBE(instance)
1012
  hvp = cluster.FillHV(instance)
1013
  args = {
1014
    "name": instance.name,
1015
    "primary_node": instance.primary_node,
1016
    "secondary_nodes": instance.secondary_nodes,
1017
    "os_type": instance.os,
1018
    "status": instance.admin_up,
1019
    "memory": bep[constants.BE_MEMORY],
1020
    "vcpus": bep[constants.BE_VCPUS],
1021
    "nics": _NICListToTuple(lu, instance.nics),
1022
    "disk_template": instance.disk_template,
1023
    "disks": [(disk.size, disk.mode) for disk in instance.disks],
1024
    "bep": bep,
1025
    "hvp": hvp,
1026
    "hypervisor_name": instance.hypervisor,
1027
    "tags": instance.tags,
1028
  }
1029
  if override:
1030
    args.update(override)
1031
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
1032

    
1033

    
1034
def _AdjustCandidatePool(lu, exceptions):
1035
  """Adjust the candidate pool after node operations.
1036

1037
  """
1038
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
1039
  if mod_list:
1040
    lu.LogInfo("Promoted nodes to master candidate role: %s",
1041
               utils.CommaJoin(node.name for node in mod_list))
1042
    for name in mod_list:
1043
      lu.context.ReaddNode(name)
1044
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1045
  if mc_now > mc_max:
1046
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
1047
               (mc_now, mc_max))
1048

    
1049

    
1050
def _DecideSelfPromotion(lu, exceptions=None):
1051
  """Decide whether I should promote myself as a master candidate.
1052

1053
  """
1054
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
1055
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1056
  # the new node will increase mc_max with one, so:
1057
  mc_should = min(mc_should + 1, cp_size)
1058
  return mc_now < mc_should
1059

    
1060

    
1061
def _CheckNicsBridgesExist(lu, target_nics, target_node):
1062
  """Check that the brigdes needed by a list of nics exist.
1063

1064
  """
1065
  cluster = lu.cfg.GetClusterInfo()
1066
  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
1067
  brlist = [params[constants.NIC_LINK] for params in paramslist
1068
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
1069
  if brlist:
1070
    result = lu.rpc.call_bridges_exist(target_node, brlist)
1071
    result.Raise("Error checking bridges on destination node '%s'" %
1072
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
1073

    
1074

    
1075
def _CheckInstanceBridgesExist(lu, instance, node=None):
1076
  """Check that the brigdes needed by an instance exist.
1077

1078
  """
1079
  if node is None:
1080
    node = instance.primary_node
1081
  _CheckNicsBridgesExist(lu, instance.nics, node)
1082

    
1083

    
1084
def _CheckOSVariant(os_obj, name):
1085
  """Check whether an OS name conforms to the os variants specification.
1086

1087
  @type os_obj: L{objects.OS}
1088
  @param os_obj: OS object to check
1089
  @type name: string
1090
  @param name: OS name passed by the user, to check for validity
1091

1092
  """
1093
  if not os_obj.supported_variants:
1094
    return
1095
  variant = objects.OS.GetVariant(name)
1096
  if not variant:
1097
    raise errors.OpPrereqError("OS name must include a variant",
1098
                               errors.ECODE_INVAL)
1099

    
1100
  if variant not in os_obj.supported_variants:
1101
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1102

    
1103

    
1104
def _GetNodeInstancesInner(cfg, fn):
1105
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1106

    
1107

    
1108
def _GetNodeInstances(cfg, node_name):
1109
  """Returns a list of all primary and secondary instances on a node.
1110

1111
  """
1112

    
1113
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1114

    
1115

    
1116
def _GetNodePrimaryInstances(cfg, node_name):
1117
  """Returns primary instances on a node.
1118

1119
  """
1120
  return _GetNodeInstancesInner(cfg,
1121
                                lambda inst: node_name == inst.primary_node)
1122

    
1123

    
1124
def _GetNodeSecondaryInstances(cfg, node_name):
1125
  """Returns secondary instances on a node.
1126

1127
  """
1128
  return _GetNodeInstancesInner(cfg,
1129
                                lambda inst: node_name in inst.secondary_nodes)
1130

    
1131

    
1132
def _GetStorageTypeArgs(cfg, storage_type):
1133
  """Returns the arguments for a storage type.
1134

1135
  """
1136
  # Special case for file storage
1137
  if storage_type == constants.ST_FILE:
1138
    # storage.FileStorage wants a list of storage directories
1139
    return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
1140

    
1141
  return []
1142

    
1143

    
1144
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1145
  faulty = []
1146

    
1147
  for dev in instance.disks:
1148
    cfg.SetDiskID(dev, node_name)
1149

    
1150
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1151
  result.Raise("Failed to get disk status from node %s" % node_name,
1152
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
1153

    
1154
  for idx, bdev_status in enumerate(result.payload):
1155
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1156
      faulty.append(idx)
1157

    
1158
  return faulty
1159

    
1160

    
1161
def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1162
  """Check the sanity of iallocator and node arguments and use the
1163
  cluster-wide iallocator if appropriate.
1164

1165
  Check that at most one of (iallocator, node) is specified. If none is
1166
  specified, then the LU's opcode's iallocator slot is filled with the
1167
  cluster-wide default iallocator.
1168

1169
  @type iallocator_slot: string
1170
  @param iallocator_slot: the name of the opcode iallocator slot
1171
  @type node_slot: string
1172
  @param node_slot: the name of the opcode target node slot
1173

1174
  """
1175
  node = getattr(lu.op, node_slot, None)
1176
  iallocator = getattr(lu.op, iallocator_slot, None)
1177

    
1178
  if node is not None and iallocator is not None:
1179
    raise errors.OpPrereqError("Do not specify both, iallocator and node",
1180
                               errors.ECODE_INVAL)
1181
  elif node is None and iallocator is None:
1182
    default_iallocator = lu.cfg.GetDefaultIAllocator()
1183
    if default_iallocator:
1184
      setattr(lu.op, iallocator_slot, default_iallocator)
1185
    else:
1186
      raise errors.OpPrereqError("No iallocator or node given and no"
1187
                                 " cluster-wide default iallocator found;"
1188
                                 " please specify either an iallocator or a"
1189
                                 " node, or set a cluster-wide default"
1190
                                 " iallocator")
1191

    
1192

    
1193
class LUClusterPostInit(LogicalUnit):
1194
  """Logical unit for running hooks after cluster initialization.
1195

1196
  """
1197
  HPATH = "cluster-init"
1198
  HTYPE = constants.HTYPE_CLUSTER
1199

    
1200
  def BuildHooksEnv(self):
1201
    """Build hooks env.
1202

1203
    """
1204
    return {
1205
      "OP_TARGET": self.cfg.GetClusterName(),
1206
      }
1207

    
1208
  def BuildHooksNodes(self):
1209
    """Build hooks nodes.
1210

1211
    """
1212
    return ([], [self.cfg.GetMasterNode()])
1213

    
1214
  def Exec(self, feedback_fn):
1215
    """Nothing to do.
1216

1217
    """
1218
    return True
1219

    
1220

    
1221
class LUClusterDestroy(LogicalUnit):
1222
  """Logical unit for destroying the cluster.
1223

1224
  """
1225
  HPATH = "cluster-destroy"
1226
  HTYPE = constants.HTYPE_CLUSTER
1227

    
1228
  def BuildHooksEnv(self):
1229
    """Build hooks env.
1230

1231
    """
1232
    return {
1233
      "OP_TARGET": self.cfg.GetClusterName(),
1234
      }
1235

    
1236
  def BuildHooksNodes(self):
1237
    """Build hooks nodes.
1238

1239
    """
1240
    return ([], [])
1241

    
1242
  def CheckPrereq(self):
1243
    """Check prerequisites.
1244

1245
    This checks whether the cluster is empty.
1246

1247
    Any errors are signaled by raising errors.OpPrereqError.
1248

1249
    """
1250
    master = self.cfg.GetMasterNode()
1251

    
1252
    nodelist = self.cfg.GetNodeList()
1253
    if len(nodelist) != 1 or nodelist[0] != master:
1254
      raise errors.OpPrereqError("There are still %d node(s) in"
1255
                                 " this cluster." % (len(nodelist) - 1),
1256
                                 errors.ECODE_INVAL)
1257
    instancelist = self.cfg.GetInstanceList()
1258
    if instancelist:
1259
      raise errors.OpPrereqError("There are still %d instance(s) in"
1260
                                 " this cluster." % len(instancelist),
1261
                                 errors.ECODE_INVAL)
1262

    
1263
  def Exec(self, feedback_fn):
1264
    """Destroys the cluster.
1265

1266
    """
1267
    master = self.cfg.GetMasterNode()
1268

    
1269
    # Run post hooks on master node before it's removed
1270
    _RunPostHook(self, master)
1271

    
1272
    result = self.rpc.call_node_stop_master(master, False)
1273
    result.Raise("Could not disable the master role")
1274

    
1275
    return master
1276

    
1277

    
1278
def _VerifyCertificate(filename):
1279
  """Verifies a certificate for L{LUClusterVerifyConfig}.
1280

1281
  @type filename: string
1282
  @param filename: Path to PEM file
1283

1284
  """
1285
  try:
1286
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1287
                                           utils.ReadFile(filename))
1288
  except Exception, err: # pylint: disable-msg=W0703
1289
    return (LUClusterVerifyConfig.ETYPE_ERROR,
1290
            "Failed to load X509 certificate %s: %s" % (filename, err))
1291

    
1292
  (errcode, msg) = \
1293
    utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1294
                                constants.SSL_CERT_EXPIRATION_ERROR)
1295

    
1296
  if msg:
1297
    fnamemsg = "While verifying %s: %s" % (filename, msg)
1298
  else:
1299
    fnamemsg = None
1300

    
1301
  if errcode is None:
1302
    return (None, fnamemsg)
1303
  elif errcode == utils.CERT_WARNING:
1304
    return (LUClusterVerifyConfig.ETYPE_WARNING, fnamemsg)
1305
  elif errcode == utils.CERT_ERROR:
1306
    return (LUClusterVerifyConfig.ETYPE_ERROR, fnamemsg)
1307

    
1308
  raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1309

    
1310

    
1311
def _GetAllHypervisorParameters(cluster, instances):
1312
  """Compute the set of all hypervisor parameters.
1313

1314
  @type cluster: L{objects.Cluster}
1315
  @param cluster: the cluster object
1316
  @param instances: list of L{objects.Instance}
1317
  @param instances: additional instances from which to obtain parameters
1318
  @rtype: list of (origin, hypervisor, parameters)
1319
  @return: a list with all parameters found, indicating the hypervisor they
1320
       apply to, and the origin (can be "cluster", "os X", or "instance Y")
1321

1322
  """
1323
  hvp_data = []
1324

    
1325
  for hv_name in cluster.enabled_hypervisors:
1326
    hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
1327

    
1328
  for os_name, os_hvp in cluster.os_hvp.items():
1329
    for hv_name, hv_params in os_hvp.items():
1330
      if hv_params:
1331
        full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
1332
        hvp_data.append(("os %s" % os_name, hv_name, full_params))
1333

    
1334
  # TODO: collapse identical parameter values in a single one
1335
  for instance in instances:
1336
    if instance.hvparams:
1337
      hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
1338
                       cluster.FillHV(instance)))
1339

    
1340
  return hvp_data
1341

    
1342

    
1343
class _VerifyErrors(object):
1344
  """Mix-in for cluster/group verify LUs.
1345

1346
  It provides _Error and _ErrorIf, and updates the self.bad boolean. (Expects
1347
  self.op and self._feedback_fn to be available.)
1348

1349
  """
1350
  TCLUSTER = "cluster"
1351
  TNODE = "node"
1352
  TINSTANCE = "instance"
1353

    
1354
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1355
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1356
  ECLUSTERFILECHECK = (TCLUSTER, "ECLUSTERFILECHECK")
1357
  ECLUSTERDANGLINGNODES = (TNODE, "ECLUSTERDANGLINGNODES")
1358
  ECLUSTERDANGLINGINST = (TNODE, "ECLUSTERDANGLINGINST")
1359
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1360
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1361
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1362
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1363
  EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
1364
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1365
  EINSTANCESPLITGROUPS = (TINSTANCE, "EINSTANCESPLITGROUPS")
1366
  ENODEDRBD = (TNODE, "ENODEDRBD")
1367
  ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1368
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1369
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1370
  ENODEHV = (TNODE, "ENODEHV")
1371
  ENODELVM = (TNODE, "ENODELVM")
1372
  ENODEN1 = (TNODE, "ENODEN1")
1373
  ENODENET = (TNODE, "ENODENET")
1374
  ENODEOS = (TNODE, "ENODEOS")
1375
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1376
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1377
  ENODERPC = (TNODE, "ENODERPC")
1378
  ENODESSH = (TNODE, "ENODESSH")
1379
  ENODEVERSION = (TNODE, "ENODEVERSION")
1380
  ENODESETUP = (TNODE, "ENODESETUP")
1381
  ENODETIME = (TNODE, "ENODETIME")
1382
  ENODEOOBPATH = (TNODE, "ENODEOOBPATH")
1383

    
1384
  ETYPE_FIELD = "code"
1385
  ETYPE_ERROR = "ERROR"
1386
  ETYPE_WARNING = "WARNING"
1387

    
1388
  def _Error(self, ecode, item, msg, *args, **kwargs):
1389
    """Format an error message.
1390

1391
    Based on the opcode's error_codes parameter, either format a
1392
    parseable error code, or a simpler error string.
1393

1394
    This must be called only from Exec and functions called from Exec.
1395

1396
    """
1397
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1398
    itype, etxt = ecode
1399
    # first complete the msg
1400
    if args:
1401
      msg = msg % args
1402
    # then format the whole message
1403
    if self.op.error_codes: # This is a mix-in. pylint: disable-msg=E1101
1404
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1405
    else:
1406
      if item:
1407
        item = " " + item
1408
      else:
1409
        item = ""
1410
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1411
    # and finally report it via the feedback_fn
1412
    self._feedback_fn("  - %s" % msg) # Mix-in. pylint: disable-msg=E1101
1413

    
1414
  def _ErrorIf(self, cond, *args, **kwargs):
1415
    """Log an error message if the passed condition is True.
1416

1417
    """
1418
    cond = (bool(cond)
1419
            or self.op.debug_simulate_errors) # pylint: disable-msg=E1101
1420
    if cond:
1421
      self._Error(*args, **kwargs)
1422
    # do not mark the operation as failed for WARN cases only
1423
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1424
      self.bad = self.bad or cond
1425

    
1426

    
1427
class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
1428
  """Verifies the cluster config.
1429

1430
  """
1431
  REQ_BGL = True
1432

    
1433
  def _VerifyHVP(self, hvp_data):
1434
    """Verifies locally the syntax of the hypervisor parameters.
1435

1436
    """
1437
    for item, hv_name, hv_params in hvp_data:
1438
      msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
1439
             (item, hv_name))
1440
      try:
1441
        hv_class = hypervisor.GetHypervisor(hv_name)
1442
        utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1443
        hv_class.CheckParameterSyntax(hv_params)
1444
      except errors.GenericError, err:
1445
        self._ErrorIf(True, self.ECLUSTERCFG, None, msg % str(err))
1446

    
1447
  def ExpandNames(self):
1448
    # Information can be safely retrieved as the BGL is acquired in exclusive
1449
    # mode
1450
    self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
1451
    self.all_node_info = self.cfg.GetAllNodesInfo()
1452
    self.all_inst_info = self.cfg.GetAllInstancesInfo()
1453
    self.needed_locks = {}
1454

    
1455
  def Exec(self, feedback_fn):
1456
    """Verify integrity of cluster, performing various test on nodes.
1457

1458
    """
1459
    self.bad = False
1460
    self._feedback_fn = feedback_fn
1461

    
1462
    feedback_fn("* Verifying cluster config")
1463

    
1464
    for msg in self.cfg.VerifyConfig():
1465
      self._ErrorIf(True, self.ECLUSTERCFG, None, msg)
1466

    
1467
    feedback_fn("* Verifying cluster certificate files")
1468

    
1469
    for cert_filename in constants.ALL_CERT_FILES:
1470
      (errcode, msg) = _VerifyCertificate(cert_filename)
1471
      self._ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
1472

    
1473
    feedback_fn("* Verifying hypervisor parameters")
1474

    
1475
    self._VerifyHVP(_GetAllHypervisorParameters(self.cfg.GetClusterInfo(),
1476
                                                self.all_inst_info.values()))
1477

    
1478
    feedback_fn("* Verifying all nodes belong to an existing group")
1479

    
1480
    # We do this verification here because, should this bogus circumstance
1481
    # occur, it would never be caught by VerifyGroup, which only acts on
1482
    # nodes/instances reachable from existing node groups.
1483

    
1484
    dangling_nodes = set(node.name for node in self.all_node_info.values()
1485
                         if node.group not in self.all_group_info)
1486

    
1487
    dangling_instances = {}
1488
    no_node_instances = []
1489

    
1490
    for inst in self.all_inst_info.values():
1491
      if inst.primary_node in dangling_nodes:
1492
        dangling_instances.setdefault(inst.primary_node, []).append(inst.name)
1493
      elif inst.primary_node not in self.all_node_info:
1494
        no_node_instances.append(inst.name)
1495

    
1496
    pretty_dangling = [
1497
        "%s (%s)" %
1498
        (node.name,
1499
         utils.CommaJoin(dangling_instances.get(node.name,
1500
                                                ["no instances"])))
1501
        for node in dangling_nodes]
1502

    
1503
    self._ErrorIf(bool(dangling_nodes), self.ECLUSTERDANGLINGNODES, None,
1504
                  "the following nodes (and their instances) belong to a non"
1505
                  " existing group: %s", utils.CommaJoin(pretty_dangling))
1506

    
1507
    self._ErrorIf(bool(no_node_instances), self.ECLUSTERDANGLINGINST, None,
1508
                  "the following instances have a non-existing primary-node:"
1509
                  " %s", utils.CommaJoin(no_node_instances))
1510

    
1511
    return (not self.bad, [g.name for g in self.all_group_info.values()])
1512

    
1513

    
1514
class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
1515
  """Verifies the status of a node group.
1516

1517
  """
1518
  HPATH = "cluster-verify"
1519
  HTYPE = constants.HTYPE_CLUSTER
1520
  REQ_BGL = False
1521

    
1522
  _HOOKS_INDENT_RE = re.compile("^", re.M)
1523

    
1524
  class NodeImage(object):
1525
    """A class representing the logical and physical status of a node.
1526

1527
    @type name: string
1528
    @ivar name: the node name to which this object refers
1529
    @ivar volumes: a structure as returned from
1530
        L{ganeti.backend.GetVolumeList} (runtime)
1531
    @ivar instances: a list of running instances (runtime)
1532
    @ivar pinst: list of configured primary instances (config)
1533
    @ivar sinst: list of configured secondary instances (config)
1534
    @ivar sbp: dictionary of {primary-node: list of instances} for all
1535
        instances for which this node is secondary (config)
1536
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1537
    @ivar dfree: free disk, as reported by the node (runtime)
1538
    @ivar offline: the offline status (config)
1539
    @type rpc_fail: boolean
1540
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1541
        not whether the individual keys were correct) (runtime)
1542
    @type lvm_fail: boolean
1543
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1544
    @type hyp_fail: boolean
1545
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1546
    @type ghost: boolean
1547
    @ivar ghost: whether this is a known node or not (config)
1548
    @type os_fail: boolean
1549
    @ivar os_fail: whether the RPC call didn't return valid OS data
1550
    @type oslist: list
1551
    @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1552
    @type vm_capable: boolean
1553
    @ivar vm_capable: whether the node can host instances
1554

1555
    """
1556
    def __init__(self, offline=False, name=None, vm_capable=True):
1557
      self.name = name
1558
      self.volumes = {}
1559
      self.instances = []
1560
      self.pinst = []
1561
      self.sinst = []
1562
      self.sbp = {}
1563
      self.mfree = 0
1564
      self.dfree = 0
1565
      self.offline = offline
1566
      self.vm_capable = vm_capable
1567
      self.rpc_fail = False
1568
      self.lvm_fail = False
1569
      self.hyp_fail = False
1570
      self.ghost = False
1571
      self.os_fail = False
1572
      self.oslist = {}
1573

    
1574
  def ExpandNames(self):
1575
    # This raises errors.OpPrereqError on its own:
1576
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
1577

    
1578
    # Get instances in node group; this is unsafe and needs verification later
1579
    inst_names = self.cfg.GetNodeGroupInstances(self.group_uuid)
1580

    
1581
    self.needed_locks = {
1582
      locking.LEVEL_INSTANCE: inst_names,
1583
      locking.LEVEL_NODEGROUP: [self.group_uuid],
1584
      locking.LEVEL_NODE: [],
1585
      }
1586

    
1587
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1588

    
1589
  def DeclareLocks(self, level):
1590
    if level == locking.LEVEL_NODE:
1591
      # Get members of node group; this is unsafe and needs verification later
1592
      nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
1593

    
1594
      all_inst_info = self.cfg.GetAllInstancesInfo()
1595

    
1596
      # In Exec(), we warn about mirrored instances that have primary and
1597
      # secondary living in separate node groups. To fully verify that
1598
      # volumes for these instances are healthy, we will need to do an
1599
      # extra call to their secondaries. We ensure here those nodes will
1600
      # be locked.
1601
      for inst in self.glm.list_owned(locking.LEVEL_INSTANCE):
1602
        # Important: access only the instances whose lock is owned
1603
        if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR:
1604
          nodes.update(all_inst_info[inst].secondary_nodes)
1605

    
1606
      self.needed_locks[locking.LEVEL_NODE] = nodes
1607

    
1608
  def CheckPrereq(self):
1609
    group_nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
1610
    group_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
1611

    
1612
    unlocked_nodes = \
1613
        group_nodes.difference(self.glm.list_owned(locking.LEVEL_NODE))
1614

    
1615
    unlocked_instances = \
1616
        group_instances.difference(self.glm.list_owned(locking.LEVEL_INSTANCE))
1617

    
1618
    if unlocked_nodes:
1619
      raise errors.OpPrereqError("Missing lock for nodes: %s" %
1620
                                 utils.CommaJoin(unlocked_nodes))
1621

    
1622
    if unlocked_instances:
1623
      raise errors.OpPrereqError("Missing lock for instances: %s" %
1624
                                 utils.CommaJoin(unlocked_instances))
1625

    
1626
    self.all_node_info = self.cfg.GetAllNodesInfo()
1627
    self.all_inst_info = self.cfg.GetAllInstancesInfo()
1628

    
1629
    self.my_node_names = utils.NiceSort(group_nodes)
1630
    self.my_inst_names = utils.NiceSort(group_instances)
1631

    
1632
    self.my_node_info = dict((name, self.all_node_info[name])
1633
                             for name in self.my_node_names)
1634

    
1635
    self.my_inst_info = dict((name, self.all_inst_info[name])
1636
                             for name in self.my_inst_names)
1637

    
1638
    # We detect here the nodes that will need the extra RPC calls for verifying
1639
    # split LV volumes; they should be locked.
1640
    extra_lv_nodes = set()
1641

    
1642
    for inst in self.my_inst_info.values():
1643
      if inst.disk_template in constants.DTS_INT_MIRROR:
1644
        group = self.my_node_info[inst.primary_node].group
1645
        for nname in inst.secondary_nodes:
1646
          if self.all_node_info[nname].group != group:
1647
            extra_lv_nodes.add(nname)
1648

    
1649
    unlocked_lv_nodes = \
1650
        extra_lv_nodes.difference(self.glm.list_owned(locking.LEVEL_NODE))
1651

    
1652
    if unlocked_lv_nodes:
1653
      raise errors.OpPrereqError("these nodes could be locked: %s" %
1654
                                 utils.CommaJoin(unlocked_lv_nodes))
1655
    self.extra_lv_nodes = list(extra_lv_nodes)
1656

    
1657
  def _VerifyNode(self, ninfo, nresult):
1658
    """Perform some basic validation on data returned from a node.
1659

1660
      - check the result data structure is well formed and has all the
1661
        mandatory fields
1662
      - check ganeti version
1663

1664
    @type ninfo: L{objects.Node}
1665
    @param ninfo: the node to check
1666
    @param nresult: the results from the node
1667
    @rtype: boolean
1668
    @return: whether overall this call was successful (and we can expect
1669
         reasonable values in the respose)
1670

1671
    """
1672
    node = ninfo.name
1673
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1674

    
1675
    # main result, nresult should be a non-empty dict
1676
    test = not nresult or not isinstance(nresult, dict)
1677
    _ErrorIf(test, self.ENODERPC, node,
1678
                  "unable to verify node: no data returned")
1679
    if test:
1680
      return False
1681

    
1682
    # compares ganeti version
1683
    local_version = constants.PROTOCOL_VERSION
1684
    remote_version = nresult.get("version", None)
1685
    test = not (remote_version and
1686
                isinstance(remote_version, (list, tuple)) and
1687
                len(remote_version) == 2)
1688
    _ErrorIf(test, self.ENODERPC, node,
1689
             "connection to node returned invalid data")
1690
    if test:
1691
      return False
1692

    
1693
    test = local_version != remote_version[0]
1694
    _ErrorIf(test, self.ENODEVERSION, node,
1695
             "incompatible protocol versions: master %s,"
1696
             " node %s", local_version, remote_version[0])
1697
    if test:
1698
      return False
1699

    
1700
    # node seems compatible, we can actually try to look into its results
1701

    
1702
    # full package version
1703
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1704
                  self.ENODEVERSION, node,
1705
                  "software version mismatch: master %s, node %s",
1706
                  constants.RELEASE_VERSION, remote_version[1],
1707
                  code=self.ETYPE_WARNING)
1708

    
1709
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1710
    if ninfo.vm_capable and isinstance(hyp_result, dict):
1711
      for hv_name, hv_result in hyp_result.iteritems():
1712
        test = hv_result is not None
1713
        _ErrorIf(test, self.ENODEHV, node,
1714
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1715

    
1716
    hvp_result = nresult.get(constants.NV_HVPARAMS, None)
1717
    if ninfo.vm_capable and isinstance(hvp_result, list):
1718
      for item, hv_name, hv_result in hvp_result:
1719
        _ErrorIf(True, self.ENODEHV, node,
1720
                 "hypervisor %s parameter verify failure (source %s): %s",
1721
                 hv_name, item, hv_result)
1722

    
1723
    test = nresult.get(constants.NV_NODESETUP,
1724
                       ["Missing NODESETUP results"])
1725
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1726
             "; ".join(test))
1727

    
1728
    return True
1729

    
1730
  def _VerifyNodeTime(self, ninfo, nresult,
1731
                      nvinfo_starttime, nvinfo_endtime):
1732
    """Check the node time.
1733

1734
    @type ninfo: L{objects.Node}
1735
    @param ninfo: the node to check
1736
    @param nresult: the remote results for the node
1737
    @param nvinfo_starttime: the start time of the RPC call
1738
    @param nvinfo_endtime: the end time of the RPC call
1739

1740
    """
1741
    node = ninfo.name
1742
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1743

    
1744
    ntime = nresult.get(constants.NV_TIME, None)
1745
    try:
1746
      ntime_merged = utils.MergeTime(ntime)
1747
    except (ValueError, TypeError):
1748
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1749
      return
1750

    
1751
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1752
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1753
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1754
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1755
    else:
1756
      ntime_diff = None
1757

    
1758
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1759
             "Node time diverges by at least %s from master node time",
1760
             ntime_diff)
1761

    
1762
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1763
    """Check the node LVM results.
1764

1765
    @type ninfo: L{objects.Node}
1766
    @param ninfo: the node to check
1767
    @param nresult: the remote results for the node
1768
    @param vg_name: the configured VG name
1769

1770
    """
1771
    if vg_name is None:
1772
      return
1773

    
1774
    node = ninfo.name
1775
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1776

    
1777
    # checks vg existence and size > 20G
1778
    vglist = nresult.get(constants.NV_VGLIST, None)
1779
    test = not vglist
1780
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1781
    if not test:
1782
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1783
                                            constants.MIN_VG_SIZE)
1784
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1785

    
1786
    # check pv names
1787
    pvlist = nresult.get(constants.NV_PVLIST, None)
1788
    test = pvlist is None
1789
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1790
    if not test:
1791
      # check that ':' is not present in PV names, since it's a
1792
      # special character for lvcreate (denotes the range of PEs to
1793
      # use on the PV)
1794
      for _, pvname, owner_vg in pvlist:
1795
        test = ":" in pvname
1796
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1797
                 " '%s' of VG '%s'", pvname, owner_vg)
1798

    
1799
  def _VerifyNodeBridges(self, ninfo, nresult, bridges):
1800
    """Check the node bridges.
1801

1802
    @type ninfo: L{objects.Node}
1803
    @param ninfo: the node to check
1804
    @param nresult: the remote results for the node
1805
    @param bridges: the expected list of bridges
1806

1807
    """
1808
    if not bridges:
1809
      return
1810

    
1811
    node = ninfo.name
1812
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1813

    
1814
    missing = nresult.get(constants.NV_BRIDGES, None)
1815
    test = not isinstance(missing, list)
1816
    _ErrorIf(test, self.ENODENET, node,
1817
             "did not return valid bridge information")
1818
    if not test:
1819
      _ErrorIf(bool(missing), self.ENODENET, node, "missing bridges: %s" %
1820
               utils.CommaJoin(sorted(missing)))
1821

    
1822
  def _VerifyNodeNetwork(self, ninfo, nresult):
1823
    """Check the node network connectivity results.
1824

1825
    @type ninfo: L{objects.Node}
1826
    @param ninfo: the node to check
1827
    @param nresult: the remote results for the node
1828

1829
    """
1830
    node = ninfo.name
1831
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1832

    
1833
    test = constants.NV_NODELIST not in nresult
1834
    _ErrorIf(test, self.ENODESSH, node,
1835
             "node hasn't returned node ssh connectivity data")
1836
    if not test:
1837
      if nresult[constants.NV_NODELIST]:
1838
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1839
          _ErrorIf(True, self.ENODESSH, node,
1840
                   "ssh communication with node '%s': %s", a_node, a_msg)
1841

    
1842
    test = constants.NV_NODENETTEST not in nresult
1843
    _ErrorIf(test, self.ENODENET, node,
1844
             "node hasn't returned node tcp connectivity data")
1845
    if not test:
1846
      if nresult[constants.NV_NODENETTEST]:
1847
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1848
        for anode in nlist:
1849
          _ErrorIf(True, self.ENODENET, node,
1850
                   "tcp communication with node '%s': %s",
1851
                   anode, nresult[constants.NV_NODENETTEST][anode])
1852

    
1853
    test = constants.NV_MASTERIP not in nresult
1854
    _ErrorIf(test, self.ENODENET, node,
1855
             "node hasn't returned node master IP reachability data")
1856
    if not test:
1857
      if not nresult[constants.NV_MASTERIP]:
1858
        if node == self.master_node:
1859
          msg = "the master node cannot reach the master IP (not configured?)"
1860
        else:
1861
          msg = "cannot reach the master IP"
1862
        _ErrorIf(True, self.ENODENET, node, msg)
1863

    
1864
  def _VerifyInstance(self, instance, instanceconfig, node_image,
1865
                      diskstatus):
1866
    """Verify an instance.
1867

1868
    This function checks to see if the required block devices are
1869
    available on the instance's node.
1870

1871
    """
1872
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1873
    node_current = instanceconfig.primary_node
1874

    
1875
    node_vol_should = {}
1876
    instanceconfig.MapLVsByNode(node_vol_should)
1877

    
1878
    for node in node_vol_should:
1879
      n_img = node_image[node]
1880
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1881
        # ignore missing volumes on offline or broken nodes
1882
        continue
1883
      for volume in node_vol_should[node]:
1884
        test = volume not in n_img.volumes
1885
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1886
                 "volume %s missing on node %s", volume, node)
1887

    
1888
    if instanceconfig.admin_up:
1889
      pri_img = node_image[node_current]
1890
      test = instance not in pri_img.instances and not pri_img.offline
1891
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1892
               "instance not running on its primary node %s",
1893
               node_current)
1894

    
1895
    diskdata = [(nname, success, status, idx)
1896
                for (nname, disks) in diskstatus.items()
1897
                for idx, (success, status) in enumerate(disks)]
1898

    
1899
    for nname, success, bdev_status, idx in diskdata:
1900
      # the 'ghost node' construction in Exec() ensures that we have a
1901
      # node here
1902
      snode = node_image[nname]
1903
      bad_snode = snode.ghost or snode.offline
1904
      _ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
1905
               self.EINSTANCEFAULTYDISK, instance,
1906
               "couldn't retrieve status for disk/%s on %s: %s",
1907
               idx, nname, bdev_status)
1908
      _ErrorIf((instanceconfig.admin_up and success and
1909
                bdev_status.ldisk_status == constants.LDS_FAULTY),
1910
               self.EINSTANCEFAULTYDISK, instance,
1911
               "disk/%s on %s is faulty", idx, nname)
1912

    
1913
  def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
1914
    """Verify if there are any unknown volumes in the cluster.
1915

1916
    The .os, .swap and backup volumes are ignored. All other volumes are
1917
    reported as unknown.
1918

1919
    @type reserved: L{ganeti.utils.FieldSet}
1920
    @param reserved: a FieldSet of reserved volume names
1921

1922
    """
1923
    for node, n_img in node_image.items():
1924
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1925
        # skip non-healthy nodes
1926
        continue
1927
      for volume in n_img.volumes:
1928
        test = ((node not in node_vol_should or
1929
                volume not in node_vol_should[node]) and
1930
                not reserved.Matches(volume))
1931
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1932
                      "volume %s is unknown", volume)
1933

    
1934
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1935
    """Verify N+1 Memory Resilience.
1936

1937
    Check that if one single node dies we can still start all the
1938
    instances it was primary for.
1939

1940
    """
1941
    cluster_info = self.cfg.GetClusterInfo()
1942
    for node, n_img in node_image.items():
1943
      # This code checks that every node which is now listed as
1944
      # secondary has enough memory to host all instances it is
1945
      # supposed to should a single other node in the cluster fail.
1946
      # FIXME: not ready for failover to an arbitrary node
1947
      # FIXME: does not support file-backed instances
1948
      # WARNING: we currently take into account down instances as well
1949
      # as up ones, considering that even if they're down someone
1950
      # might want to start them even in the event of a node failure.
1951
      if n_img.offline:
1952
        # we're skipping offline nodes from the N+1 warning, since
1953
        # most likely we don't have good memory infromation from them;
1954
        # we already list instances living on such nodes, and that's
1955
        # enough warning
1956
        continue
1957
      for prinode, instances in n_img.sbp.items():
1958
        needed_mem = 0
1959
        for instance in instances:
1960
          bep = cluster_info.FillBE(instance_cfg[instance])
1961
          if bep[constants.BE_AUTO_BALANCE]:
1962
            needed_mem += bep[constants.BE_MEMORY]
1963
        test = n_img.mfree < needed_mem
1964
        self._ErrorIf(test, self.ENODEN1, node,
1965
                      "not enough memory to accomodate instance failovers"
1966
                      " should node %s fail (%dMiB needed, %dMiB available)",
1967
                      prinode, needed_mem, n_img.mfree)
1968

    
1969
  @classmethod
1970
  def _VerifyFiles(cls, errorif, nodeinfo, master_node, all_nvinfo,
1971
                   (files_all, files_all_opt, files_mc, files_vm)):
1972
    """Verifies file checksums collected from all nodes.
1973

1974
    @param errorif: Callback for reporting errors
1975
    @param nodeinfo: List of L{objects.Node} objects
1976
    @param master_node: Name of master node
1977
    @param all_nvinfo: RPC results
1978

1979
    """
1980
    node_names = frozenset(node.name for node in nodeinfo)
1981

    
1982
    assert master_node in node_names
1983
    assert (len(files_all | files_all_opt | files_mc | files_vm) ==
1984
            sum(map(len, [files_all, files_all_opt, files_mc, files_vm]))), \
1985
           "Found file listed in more than one file list"
1986

    
1987
    # Define functions determining which nodes to consider for a file
1988
    file2nodefn = dict([(filename, fn)
1989
      for (files, fn) in [(files_all, None),
1990
                          (files_all_opt, None),
1991
                          (files_mc, lambda node: (node.master_candidate or
1992
                                                   node.name == master_node)),
1993
                          (files_vm, lambda node: node.vm_capable)]
1994
      for filename in files])
1995

    
1996
    fileinfo = dict((filename, {}) for filename in file2nodefn.keys())
1997

    
1998
    for node in nodeinfo:
1999
      nresult = all_nvinfo[node.name]
2000

    
2001
      if nresult.fail_msg or not nresult.payload:
2002
        node_files = None
2003
      else:
2004
        node_files = nresult.payload.get(constants.NV_FILELIST, None)
2005

    
2006
      test = not (node_files and isinstance(node_files, dict))
2007
      errorif(test, cls.ENODEFILECHECK, node.name,
2008
              "Node did not return file checksum data")
2009
      if test:
2010
        continue
2011

    
2012
      for (filename, checksum) in node_files.items():
2013
        # Check if the file should be considered for a node
2014
        fn = file2nodefn[filename]
2015
        if fn is None or fn(node):
2016
          fileinfo[filename].setdefault(checksum, set()).add(node.name)
2017

    
2018
    for (filename, checksums) in fileinfo.items():
2019
      assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum"
2020

    
2021
      # Nodes having the file
2022
      with_file = frozenset(node_name
2023
                            for nodes in fileinfo[filename].values()
2024
                            for node_name in nodes)
2025

    
2026
      # Nodes missing file
2027
      missing_file = node_names - with_file
2028

    
2029
      if filename in files_all_opt:
2030
        # All or no nodes
2031
        errorif(missing_file and missing_file != node_names,
2032
                cls.ECLUSTERFILECHECK, None,
2033
                "File %s is optional, but it must exist on all or no"
2034
                " nodes (not found on %s)",
2035
                filename, utils.CommaJoin(utils.NiceSort(missing_file)))
2036
      else:
2037
        errorif(missing_file, cls.ECLUSTERFILECHECK, None,
2038
                "File %s is missing from node(s) %s", filename,
2039
                utils.CommaJoin(utils.NiceSort(missing_file)))
2040

    
2041
      # See if there are multiple versions of the file
2042
      test = len(checksums) > 1
2043
      if test:
2044
        variants = ["variant %s on %s" %
2045
                    (idx + 1, utils.CommaJoin(utils.NiceSort(nodes)))
2046
                    for (idx, (checksum, nodes)) in
2047
                      enumerate(sorted(checksums.items()))]
2048
      else:
2049
        variants = []
2050

    
2051
      errorif(test, cls.ECLUSTERFILECHECK, None,
2052
              "File %s found with %s different checksums (%s)",
2053
              filename, len(checksums), "; ".join(variants))
2054

    
2055
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
2056
                      drbd_map):
2057
    """Verifies and the node DRBD status.
2058

2059
    @type ninfo: L{objects.Node}
2060
    @param ninfo: the node to check
2061
    @param nresult: the remote results for the node
2062
    @param instanceinfo: the dict of instances
2063
    @param drbd_helper: the configured DRBD usermode helper
2064
    @param drbd_map: the DRBD map as returned by
2065
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
2066

2067
    """
2068
    node = ninfo.name
2069
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2070

    
2071
    if drbd_helper:
2072
      helper_result = nresult.get(constants.NV_DRBDHELPER, None)
2073
      test = (helper_result == None)
2074
      _ErrorIf(test, self.ENODEDRBDHELPER, node,
2075
               "no drbd usermode helper returned")
2076
      if helper_result:
2077
        status, payload = helper_result
2078
        test = not status
2079
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
2080
                 "drbd usermode helper check unsuccessful: %s", payload)
2081
        test = status and (payload != drbd_helper)
2082
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
2083
                 "wrong drbd usermode helper: %s", payload)
2084

    
2085
    # compute the DRBD minors
2086
    node_drbd = {}
2087
    for minor, instance in drbd_map[node].items():
2088
      test = instance not in instanceinfo
2089
      _ErrorIf(test, self.ECLUSTERCFG, None,
2090
               "ghost instance '%s' in temporary DRBD map", instance)
2091
        # ghost instance should not be running, but otherwise we
2092
        # don't give double warnings (both ghost instance and
2093
        # unallocated minor in use)
2094
      if test:
2095
        node_drbd[minor] = (instance, False)
2096
      else:
2097
        instance = instanceinfo[instance]
2098
        node_drbd[minor] = (instance.name, instance.admin_up)
2099

    
2100
    # and now check them
2101
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
2102
    test = not isinstance(used_minors, (tuple, list))
2103
    _ErrorIf(test, self.ENODEDRBD, node,
2104
             "cannot parse drbd status file: %s", str(used_minors))
2105
    if test:
2106
      # we cannot check drbd status
2107
      return
2108

    
2109
    for minor, (iname, must_exist) in node_drbd.items():
2110
      test = minor not in used_minors and must_exist
2111
      _ErrorIf(test, self.ENODEDRBD, node,
2112
               "drbd minor %d of instance %s is not active", minor, iname)
2113
    for minor in used_minors:
2114
      test = minor not in node_drbd
2115
      _ErrorIf(test, self.ENODEDRBD, node,
2116
               "unallocated drbd minor %d is in use", minor)
2117

    
2118
  def _UpdateNodeOS(self, ninfo, nresult, nimg):
2119
    """Builds the node OS structures.
2120

2121
    @type ninfo: L{objects.Node}
2122
    @param ninfo: the node to check
2123
    @param nresult: the remote results for the node
2124
    @param nimg: the node image object
2125

2126
    """
2127
    node = ninfo.name
2128
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2129

    
2130
    remote_os = nresult.get(constants.NV_OSLIST, None)
2131
    test = (not isinstance(remote_os, list) or
2132
            not compat.all(isinstance(v, list) and len(v) == 7
2133
                           for v in remote_os))
2134

    
2135
    _ErrorIf(test, self.ENODEOS, node,
2136
             "node hasn't returned valid OS data")
2137

    
2138
    nimg.os_fail = test
2139

    
2140
    if test:
2141
      return
2142

    
2143
    os_dict = {}
2144

    
2145
    for (name, os_path, status, diagnose,
2146
         variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
2147

    
2148
      if name not in os_dict:
2149
        os_dict[name] = []
2150

    
2151
      # parameters is a list of lists instead of list of tuples due to
2152
      # JSON lacking a real tuple type, fix it:
2153
      parameters = [tuple(v) for v in parameters]
2154
      os_dict[name].append((os_path, status, diagnose,
2155
                            set(variants), set(parameters), set(api_ver)))
2156

    
2157
    nimg.oslist = os_dict
2158

    
2159
  def _VerifyNodeOS(self, ninfo, nimg, base):
2160
    """Verifies the node OS list.
2161

2162
    @type ninfo: L{objects.Node}
2163
    @param ninfo: the node to check
2164
    @param nimg: the node image object
2165
    @param base: the 'template' node we match against (e.g. from the master)
2166

2167
    """
2168
    node = ninfo.name
2169
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2170

    
2171
    assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
2172

    
2173
    beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
2174
    for os_name, os_data in nimg.oslist.items():
2175
      assert os_data, "Empty OS status for OS %s?!" % os_name
2176
      f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
2177
      _ErrorIf(not f_status, self.ENODEOS, node,
2178
               "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
2179
      _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
2180
               "OS '%s' has multiple entries (first one shadows the rest): %s",
2181
               os_name, utils.CommaJoin([v[0] for v in os_data]))
2182
      # this will catched in backend too
2183
      _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
2184
               and not f_var, self.ENODEOS, node,
2185
               "OS %s with API at least %d does not declare any variant",
2186
               os_name, constants.OS_API_V15)
2187
      # comparisons with the 'base' image
2188
      test = os_name not in base.oslist
2189
      _ErrorIf(test, self.ENODEOS, node,
2190
               "Extra OS %s not present on reference node (%s)",
2191
               os_name, base.name)
2192
      if test:
2193
        continue
2194
      assert base.oslist[os_name], "Base node has empty OS status?"
2195
      _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
2196
      if not b_status:
2197
        # base OS is invalid, skipping
2198
        continue
2199
      for kind, a, b in [("API version", f_api, b_api),
2200
                         ("variants list", f_var, b_var),
2201
                         ("parameters", beautify_params(f_param),
2202
                          beautify_params(b_param))]:
2203
        _ErrorIf(a != b, self.ENODEOS, node,
2204
                 "OS %s for %s differs from reference node %s: [%s] vs. [%s]",
2205
                 kind, os_name, base.name,
2206
                 utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
2207

    
2208
    # check any missing OSes
2209
    missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
2210
    _ErrorIf(missing, self.ENODEOS, node,
2211
             "OSes present on reference node %s but missing on this node: %s",
2212
             base.name, utils.CommaJoin(missing))
2213

    
2214
  def _VerifyOob(self, ninfo, nresult):
2215
    """Verifies out of band functionality of a node.
2216

2217
    @type ninfo: L{objects.Node}
2218
    @param ninfo: the node to check
2219
    @param nresult: the remote results for the node
2220

2221
    """
2222
    node = ninfo.name
2223
    # We just have to verify the paths on master and/or master candidates
2224
    # as the oob helper is invoked on the master
2225
    if ((ninfo.master_candidate or ninfo.master_capable) and
2226
        constants.NV_OOB_PATHS in nresult):
2227
      for path_result in nresult[constants.NV_OOB_PATHS]:
2228
        self._ErrorIf(path_result, self.ENODEOOBPATH, node, path_result)
2229

    
2230
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
2231
    """Verifies and updates the node volume data.
2232

2233
    This function will update a L{NodeImage}'s internal structures
2234
    with data from the remote call.
2235

2236
    @type ninfo: L{objects.Node}
2237
    @param ninfo: the node to check
2238
    @param nresult: the remote results for the node
2239
    @param nimg: the node image object
2240
    @param vg_name: the configured VG name
2241

2242
    """
2243
    node = ninfo.name
2244
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2245

    
2246
    nimg.lvm_fail = True
2247
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
2248
    if vg_name is None:
2249
      pass
2250
    elif isinstance(lvdata, basestring):
2251
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
2252
               utils.SafeEncode(lvdata))
2253
    elif not isinstance(lvdata, dict):
2254
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
2255
    else:
2256
      nimg.volumes = lvdata
2257
      nimg.lvm_fail = False
2258

    
2259
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
2260
    """Verifies and updates the node instance list.
2261

2262
    If the listing was successful, then updates this node's instance
2263
    list. Otherwise, it marks the RPC call as failed for the instance
2264
    list key.
2265

2266
    @type ninfo: L{objects.Node}
2267
    @param ninfo: the node to check
2268
    @param nresult: the remote results for the node
2269
    @param nimg: the node image object
2270

2271
    """
2272
    idata = nresult.get(constants.NV_INSTANCELIST, None)
2273
    test = not isinstance(idata, list)
2274
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
2275
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
2276
    if test:
2277
      nimg.hyp_fail = True
2278
    else:
2279
      nimg.instances = idata
2280

    
2281
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
2282
    """Verifies and computes a node information map
2283

2284
    @type ninfo: L{objects.Node}
2285
    @param ninfo: the node to check
2286
    @param nresult: the remote results for the node
2287
    @param nimg: the node image object
2288
    @param vg_name: the configured VG name
2289

2290
    """
2291
    node = ninfo.name
2292
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2293

    
2294
    # try to read free memory (from the hypervisor)
2295
    hv_info = nresult.get(constants.NV_HVINFO, None)
2296
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
2297
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
2298
    if not test:
2299
      try:
2300
        nimg.mfree = int(hv_info["memory_free"])
2301
      except (ValueError, TypeError):
2302
        _ErrorIf(True, self.ENODERPC, node,
2303
                 "node returned invalid nodeinfo, check hypervisor")
2304

    
2305
    # FIXME: devise a free space model for file based instances as well
2306
    if vg_name is not None:
2307
      test = (constants.NV_VGLIST not in nresult or
2308
              vg_name not in nresult[constants.NV_VGLIST])
2309
      _ErrorIf(test, self.ENODELVM, node,
2310
               "node didn't return data for the volume group '%s'"
2311
               " - it is either missing or broken", vg_name)
2312
      if not test:
2313
        try:
2314
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
2315
        except (ValueError, TypeError):
2316
          _ErrorIf(True, self.ENODERPC, node,
2317
                   "node returned invalid LVM info, check LVM status")
2318

    
2319
  def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
2320
    """Gets per-disk status information for all instances.
2321

2322
    @type nodelist: list of strings
2323
    @param nodelist: Node names
2324
    @type node_image: dict of (name, L{objects.Node})
2325
    @param node_image: Node objects
2326
    @type instanceinfo: dict of (name, L{objects.Instance})
2327
    @param instanceinfo: Instance objects
2328
    @rtype: {instance: {node: [(succes, payload)]}}
2329
    @return: a dictionary of per-instance dictionaries with nodes as
2330
        keys and disk information as values; the disk information is a
2331
        list of tuples (success, payload)
2332

2333
    """
2334
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2335

    
2336
    node_disks = {}
2337
    node_disks_devonly = {}
2338
    diskless_instances = set()
2339
    diskless = constants.DT_DISKLESS
2340

    
2341
    for nname in nodelist:
2342
      node_instances = list(itertools.chain(node_image[nname].pinst,
2343
                                            node_image[nname].sinst))
2344
      diskless_instances.update(inst for inst in node_instances
2345
                                if instanceinfo[inst].disk_template == diskless)
2346
      disks = [(inst, disk)
2347
               for inst in node_instances
2348
               for disk in instanceinfo[inst].disks]
2349

    
2350
      if not disks:
2351
        # No need to collect data
2352
        continue
2353

    
2354
      node_disks[nname] = disks
2355

    
2356
      # Creating copies as SetDiskID below will modify the objects and that can
2357
      # lead to incorrect data returned from nodes
2358
      devonly = [dev.Copy() for (_, dev) in disks]
2359

    
2360
      for dev in devonly:
2361
        self.cfg.SetDiskID(dev, nname)
2362

    
2363
      node_disks_devonly[nname] = devonly
2364

    
2365
    assert len(node_disks) == len(node_disks_devonly)
2366

    
2367
    # Collect data from all nodes with disks
2368
    result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2369
                                                          node_disks_devonly)
2370

    
2371
    assert len(result) == len(node_disks)
2372

    
2373
    instdisk = {}
2374

    
2375
    for (nname, nres) in result.items():
2376
      disks = node_disks[nname]
2377

    
2378
      if nres.offline:
2379
        # No data from this node
2380
        data = len(disks) * [(False, "node offline")]
2381
      else:
2382
        msg = nres.fail_msg
2383
        _ErrorIf(msg, self.ENODERPC, nname,
2384
                 "while getting disk information: %s", msg)
2385
        if msg:
2386
          # No data from this node
2387
          data = len(disks) * [(False, msg)]
2388
        else:
2389
          data = []
2390
          for idx, i in enumerate(nres.payload):
2391
            if isinstance(i, (tuple, list)) and len(i) == 2:
2392
              data.append(i)
2393
            else:
2394
              logging.warning("Invalid result from node %s, entry %d: %s",
2395
                              nname, idx, i)
2396
              data.append((False, "Invalid result from the remote node"))
2397

    
2398
      for ((inst, _), status) in zip(disks, data):
2399
        instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2400

    
2401
    # Add empty entries for diskless instances.
2402
    for inst in diskless_instances:
2403
      assert inst not in instdisk
2404
      instdisk[inst] = {}
2405

    
2406
    assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2407
                      len(nnames) <= len(instanceinfo[inst].all_nodes) and
2408
                      compat.all(isinstance(s, (tuple, list)) and
2409
                                 len(s) == 2 for s in statuses)
2410
                      for inst, nnames in instdisk.items()
2411
                      for nname, statuses in nnames.items())
2412
    assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2413

    
2414
    return instdisk
2415

    
2416
  def BuildHooksEnv(self):
2417
    """Build hooks env.
2418

2419
    Cluster-Verify hooks just ran in the post phase and their failure makes
2420
    the output be logged in the verify output and the verification to fail.
2421

2422
    """
2423
    env = {
2424
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
2425
      }
2426

    
2427
    env.update(("NODE_TAGS_%s" % node.name, " ".join(node.GetTags()))
2428
               for node in self.my_node_info.values())
2429

    
2430
    return env
2431

    
2432
  def BuildHooksNodes(self):
2433
    """Build hooks nodes.
2434

2435
    """
2436
    return ([], self.my_node_names)
2437

    
2438
  def Exec(self, feedback_fn):
2439
    """Verify integrity of the node group, performing various test on nodes.
2440

2441
    """
2442
    # This method has too many local variables. pylint: disable-msg=R0914
2443

    
2444
    if not self.my_node_names:
2445
      # empty node group
2446
      feedback_fn("* Empty node group, skipping verification")
2447
      return True
2448

    
2449
    self.bad = False
2450
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2451
    verbose = self.op.verbose
2452
    self._feedback_fn = feedback_fn
2453

    
2454
    vg_name = self.cfg.GetVGName()
2455
    drbd_helper = self.cfg.GetDRBDHelper()
2456
    cluster = self.cfg.GetClusterInfo()
2457
    groupinfo = self.cfg.GetAllNodeGroupsInfo()
2458
    hypervisors = cluster.enabled_hypervisors
2459
    node_data_list = [self.my_node_info[name] for name in self.my_node_names]
2460

    
2461
    i_non_redundant = [] # Non redundant instances
2462
    i_non_a_balanced = [] # Non auto-balanced instances
2463
    n_offline = 0 # Count of offline nodes
2464
    n_drained = 0 # Count of nodes being drained
2465
    node_vol_should = {}
2466

    
2467
    # FIXME: verify OS list
2468

    
2469
    # File verification
2470
    filemap = _ComputeAncillaryFiles(cluster, False)
2471

    
2472
    # do local checksums
2473
    master_node = self.master_node = self.cfg.GetMasterNode()
2474
    master_ip = self.cfg.GetMasterIP()
2475

    
2476
    feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_names))
2477

    
2478
    # We will make nodes contact all nodes in their group, and one node from
2479
    # every other group.
2480
    # TODO: should it be a *random* node, different every time?
2481
    online_nodes = [node.name for node in node_data_list if not node.offline]
2482
    other_group_nodes = {}
2483

    
2484
    for name in sorted(self.all_node_info):
2485
      node = self.all_node_info[name]
2486
      if (node.group not in other_group_nodes
2487
          and node.group != self.group_uuid
2488
          and not node.offline):
2489
        other_group_nodes[node.group] = node.name
2490

    
2491
    node_verify_param = {
2492
      constants.NV_FILELIST:
2493
        utils.UniqueSequence(filename
2494
                             for files in filemap
2495
                             for filename in files),
2496
      constants.NV_NODELIST: online_nodes + other_group_nodes.values(),
2497
      constants.NV_HYPERVISOR: hypervisors,
2498
      constants.NV_HVPARAMS:
2499
        _GetAllHypervisorParameters(cluster, self.all_inst_info.values()),
2500
      constants.NV_NODENETTEST: [(node.name, node.primary_ip, node.secondary_ip)
2501
                                 for node in node_data_list
2502
                                 if not node.offline],
2503
      constants.NV_INSTANCELIST: hypervisors,
2504
      constants.NV_VERSION: None,
2505
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2506
      constants.NV_NODESETUP: None,
2507
      constants.NV_TIME: None,
2508
      constants.NV_MASTERIP: (master_node, master_ip),
2509
      constants.NV_OSLIST: None,
2510
      constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
2511
      }
2512

    
2513
    if vg_name is not None:
2514
      node_verify_param[constants.NV_VGLIST] = None
2515
      node_verify_param[constants.NV_LVLIST] = vg_name
2516
      node_verify_param[constants.NV_PVLIST] = [vg_name]
2517
      node_verify_param[constants.NV_DRBDLIST] = None
2518

    
2519
    if drbd_helper:
2520
      node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2521

    
2522
    # bridge checks
2523
    # FIXME: this needs to be changed per node-group, not cluster-wide
2524
    bridges = set()
2525
    default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
2526
    if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2527
      bridges.add(default_nicpp[constants.NIC_LINK])
2528
    for instance in self.my_inst_info.values():
2529
      for nic in instance.nics:
2530
        full_nic = cluster.SimpleFillNIC(nic.nicparams)
2531
        if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2532
          bridges.add(full_nic[constants.NIC_LINK])
2533

    
2534
    if bridges:
2535
      node_verify_param[constants.NV_BRIDGES] = list(bridges)
2536

    
2537
    # Build our expected cluster state
2538
    node_image = dict((node.name, self.NodeImage(offline=node.offline,
2539
                                                 name=node.name,
2540
                                                 vm_capable=node.vm_capable))
2541
                      for node in node_data_list)
2542

    
2543
    # Gather OOB paths
2544
    oob_paths = []
2545
    for node in self.all_node_info.values():
2546
      path = _SupportsOob(self.cfg, node)
2547
      if path and path not in oob_paths:
2548
        oob_paths.append(path)
2549

    
2550
    if oob_paths:
2551
      node_verify_param[constants.NV_OOB_PATHS] = oob_paths
2552

    
2553
    for instance in self.my_inst_names:
2554
      inst_config = self.my_inst_info[instance]
2555

    
2556
      for nname in inst_config.all_nodes:
2557
        if nname not in node_image:
2558
          gnode = self.NodeImage(name=nname)
2559
          gnode.ghost = (nname not in self.all_node_info)
2560
          node_image[nname] = gnode
2561

    
2562
      inst_config.MapLVsByNode(node_vol_should)
2563

    
2564
      pnode = inst_config.primary_node
2565
      node_image[pnode].pinst.append(instance)
2566

    
2567
      for snode in inst_config.secondary_nodes:
2568
        nimg = node_image[snode]
2569
        nimg.sinst.append(instance)
2570
        if pnode not in nimg.sbp:
2571
          nimg.sbp[pnode] = []
2572
        nimg.sbp[pnode].append(instance)
2573

    
2574
    # At this point, we have the in-memory data structures complete,
2575
    # except for the runtime information, which we'll gather next
2576

    
2577
    # Due to the way our RPC system works, exact response times cannot be
2578
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2579
    # time before and after executing the request, we can at least have a time
2580
    # window.
2581
    nvinfo_starttime = time.time()
2582
    all_nvinfo = self.rpc.call_node_verify(self.my_node_names,
2583
                                           node_verify_param,
2584
                                           self.cfg.GetClusterName())
2585
    nvinfo_endtime = time.time()
2586

    
2587
    if self.extra_lv_nodes and vg_name is not None:
2588
      extra_lv_nvinfo = \
2589
          self.rpc.call_node_verify(self.extra_lv_nodes,
2590
                                    {constants.NV_LVLIST: vg_name},
2591
                                    self.cfg.GetClusterName())
2592
    else:
2593
      extra_lv_nvinfo = {}
2594

    
2595
    all_drbd_map = self.cfg.ComputeDRBDMap()
2596

    
2597
    feedback_fn("* Gathering disk information (%s nodes)" %
2598
                len(self.my_node_names))
2599
    instdisk = self._CollectDiskInfo(self.my_node_names, node_image,
2600
                                     self.my_inst_info)
2601

    
2602
    feedback_fn("* Verifying configuration file consistency")
2603

    
2604
    # If not all nodes are being checked, we need to make sure the master node
2605
    # and a non-checked vm_capable node are in the list.
2606
    absent_nodes = set(self.all_node_info).difference(self.my_node_info)
2607
    if absent_nodes:
2608
      vf_nvinfo = all_nvinfo.copy()
2609
      vf_node_info = list(self.my_node_info.values())
2610
      additional_nodes = []
2611
      if master_node not in self.my_node_info:
2612
        additional_nodes.append(master_node)
2613
        vf_node_info.append(self.all_node_info[master_node])
2614
      # Add the first vm_capable node we find which is not included
2615
      for node in absent_nodes:
2616
        nodeinfo = self.all_node_info[node]
2617
        if nodeinfo.vm_capable and not nodeinfo.offline:
2618
          additional_nodes.append(node)
2619
          vf_node_info.append(self.all_node_info[node])
2620
          break
2621
      key = constants.NV_FILELIST
2622
      vf_nvinfo.update(self.rpc.call_node_verify(additional_nodes,
2623
                                                 {key: node_verify_param[key]},
2624
                                                 self.cfg.GetClusterName()))
2625
    else:
2626
      vf_nvinfo = all_nvinfo
2627
      vf_node_info = self.my_node_info.values()
2628

    
2629
    self._VerifyFiles(_ErrorIf, vf_node_info, master_node, vf_nvinfo, filemap)
2630

    
2631
    feedback_fn("* Verifying node status")
2632

    
2633
    refos_img = None
2634

    
2635
    for node_i in node_data_list:
2636
      node = node_i.name
2637
      nimg = node_image[node]
2638

    
2639
      if node_i.offline:
2640
        if verbose:
2641
          feedback_fn("* Skipping offline node %s" % (node,))
2642
        n_offline += 1
2643
        continue
2644

    
2645
      if node == master_node:
2646
        ntype = "master"
2647
      elif node_i.master_candidate:
2648
        ntype = "master candidate"
2649
      elif node_i.drained:
2650
        ntype = "drained"
2651
        n_drained += 1
2652
      else:
2653
        ntype = "regular"
2654
      if verbose:
2655
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2656

    
2657
      msg = all_nvinfo[node].fail_msg
2658
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2659
      if msg:
2660
        nimg.rpc_fail = True
2661
        continue
2662

    
2663
      nresult = all_nvinfo[node].payload
2664

    
2665
      nimg.call_ok = self._VerifyNode(node_i, nresult)
2666
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2667
      self._VerifyNodeNetwork(node_i, nresult)
2668
      self._VerifyOob(node_i, nresult)
2669

    
2670
      if nimg.vm_capable:
2671
        self._VerifyNodeLVM(node_i, nresult, vg_name)
2672
        self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info, drbd_helper,
2673
                             all_drbd_map)
2674

    
2675
        self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2676
        self._UpdateNodeInstances(node_i, nresult, nimg)
2677
        self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2678
        self._UpdateNodeOS(node_i, nresult, nimg)
2679

    
2680
        if not nimg.os_fail:
2681
          if refos_img is None:
2682
            refos_img = nimg
2683
          self._VerifyNodeOS(node_i, nimg, refos_img)
2684
        self._VerifyNodeBridges(node_i, nresult, bridges)
2685

    
2686
        # Check whether all running instancies are primary for the node. (This
2687
        # can no longer be done from _VerifyInstance below, since some of the
2688
        # wrong instances could be from other node groups.)
2689
        non_primary_inst = set(nimg.instances).difference(nimg.pinst)
2690

    
2691
        for inst in non_primary_inst:
2692
          test = inst in self.all_inst_info
2693
          _ErrorIf(test, self.EINSTANCEWRONGNODE, inst,
2694
                   "instance should not run on node %s", node_i.name)
2695
          _ErrorIf(not test, self.ENODEORPHANINSTANCE, node_i.name,
2696
                   "node is running unknown instance %s", inst)
2697

    
2698
    for node, result in extra_lv_nvinfo.items():
2699
      self._UpdateNodeVolumes(self.all_node_info[node], result.payload,
2700
                              node_image[node], vg_name)
2701

    
2702
    feedback_fn("* Verifying instance status")
2703
    for instance in self.my_inst_names:
2704
      if verbose:
2705
        feedback_fn("* Verifying instance %s" % instance)
2706
      inst_config = self.my_inst_info[instance]
2707
      self._VerifyInstance(instance, inst_config, node_image,
2708
                           instdisk[instance])
2709
      inst_nodes_offline = []
2710

    
2711
      pnode = inst_config.primary_node
2712
      pnode_img = node_image[pnode]
2713
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2714
               self.ENODERPC, pnode, "instance %s, connection to"
2715
               " primary node failed", instance)
2716

    
2717
      _ErrorIf(inst_config.admin_up and pnode_img.offline,
2718
               self.EINSTANCEBADNODE, instance,
2719
               "instance is marked as running and lives on offline node %s",
2720
               inst_config.primary_node)
2721

    
2722
      # If the instance is non-redundant we cannot survive losing its primary
2723
      # node, so we are not N+1 compliant. On the other hand we have no disk
2724
      # templates with more than one secondary so that situation is not well
2725
      # supported either.
2726
      # FIXME: does not support file-backed instances
2727
      if not inst_config.secondary_nodes:
2728
        i_non_redundant.append(instance)
2729

    
2730
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2731
               instance, "instance has multiple secondary nodes: %s",
2732
               utils.CommaJoin(inst_config.secondary_nodes),
2733
               code=self.ETYPE_WARNING)
2734

    
2735
      if inst_config.disk_template in constants.DTS_INT_MIRROR:
2736
        pnode = inst_config.primary_node
2737
        instance_nodes = utils.NiceSort(inst_config.all_nodes)
2738
        instance_groups = {}
2739

    
2740
        for node in instance_nodes:
2741
          instance_groups.setdefault(self.all_node_info[node].group,
2742
                                     []).append(node)
2743

    
2744
        pretty_list = [
2745
          "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
2746
          # Sort so that we always list the primary node first.
2747
          for group, nodes in sorted(instance_groups.items(),
2748
                                     key=lambda (_, nodes): pnode in nodes,
2749
                                     reverse=True)]
2750

    
2751
        self._ErrorIf(len(instance_groups) > 1, self.EINSTANCESPLITGROUPS,
2752
                      instance, "instance has primary and secondary nodes in"
2753
                      " different groups: %s", utils.CommaJoin(pretty_list),
2754
                      code=self.ETYPE_WARNING)
2755

    
2756
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2757
        i_non_a_balanced.append(instance)
2758

    
2759
      for snode in inst_config.secondary_nodes:
2760
        s_img = node_image[snode]
2761
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2762
                 "instance %s, connection to secondary node failed", instance)
2763

    
2764
        if s_img.offline:
2765
          inst_nodes_offline.append(snode)
2766

    
2767
      # warn that the instance lives on offline nodes
2768
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2769
               "instance has offline secondary node(s) %s",
2770
               utils.CommaJoin(inst_nodes_offline))
2771
      # ... or ghost/non-vm_capable nodes
2772
      for node in inst_config.all_nodes:
2773
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2774
                 "instance lives on ghost node %s", node)
2775
        _ErrorIf(not node_image[node].vm_capable, self.EINSTANCEBADNODE,
2776
                 instance, "instance lives on non-vm_capable node %s", node)
2777

    
2778
    feedback_fn("* Verifying orphan volumes")
2779
    reserved = utils.FieldSet(*cluster.reserved_lvs)
2780

    
2781
    # We will get spurious "unknown volume" warnings if any node of this group
2782
    # is secondary for an instance whose primary is in another group. To avoid
2783
    # them, we find these instances and add their volumes to node_vol_should.
2784
    for inst in self.all_inst_info.values():
2785
      for secondary in inst.secondary_nodes:
2786
        if (secondary in self.my_node_info
2787
            and inst.name not in self.my_inst_info):
2788
          inst.MapLVsByNode(node_vol_should)
2789
          break
2790

    
2791
    self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2792

    
2793
    if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2794
      feedback_fn("* Verifying N+1 Memory redundancy")
2795
      self._VerifyNPlusOneMemory(node_image, self.my_inst_info)
2796

    
2797
    feedback_fn("* Other Notes")
2798
    if i_non_redundant:
2799
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
2800
                  % len(i_non_redundant))
2801

    
2802
    if i_non_a_balanced:
2803
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
2804
                  % len(i_non_a_balanced))
2805

    
2806
    if n_offline:
2807
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
2808

    
2809
    if n_drained:
2810
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
2811

    
2812
    return not self.bad
2813

    
2814
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2815
    """Analyze the post-hooks' result
2816

2817
    This method analyses the hook result, handles it, and sends some
2818
    nicely-formatted feedback back to the user.
2819

2820
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
2821
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2822
    @param hooks_results: the results of the multi-node hooks rpc call
2823
    @param feedback_fn: function used send feedback back to the caller
2824
    @param lu_result: previous Exec result
2825
    @return: the new Exec result, based on the previous result
2826
        and hook results
2827

2828
    """
2829
    # We only really run POST phase hooks, only for non-empty groups,
2830
    # and are only interested in their results
2831
    if not self.my_node_names:
2832
      # empty node group
2833
      pass
2834
    elif phase == constants.HOOKS_PHASE_POST:
2835
      # Used to change hooks' output to proper indentation
2836
      feedback_fn("* Hooks Results")
2837
      assert hooks_results, "invalid result from hooks"
2838

    
2839
      for node_name in hooks_results:
2840
        res = hooks_results[node_name]
2841
        msg = res.fail_msg
2842
        test = msg and not res.offline
2843
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
2844
                      "Communication failure in hooks execution: %s", msg)
2845
        if res.offline or msg:
2846
          # No need to investigate payload if node is offline or gave an error.
2847
          # override manually lu_result here as _ErrorIf only
2848
          # overrides self.bad
2849
          lu_result = 1
2850
          continue
2851
        for script, hkr, output in res.payload:
2852
          test = hkr == constants.HKR_FAIL
2853
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
2854
                        "Script %s failed, output:", script)
2855
          if test:
2856
            output = self._HOOKS_INDENT_RE.sub("      ", output)
2857
            feedback_fn("%s" % output)
2858
            lu_result = 0
2859

    
2860
    return lu_result
2861

    
2862

    
2863
class LUClusterVerifyDisks(NoHooksLU):
2864
  """Verifies the cluster disks status.
2865

2866
  """
2867
  REQ_BGL = False
2868

    
2869
  def ExpandNames(self):
2870
    self.needed_locks = {
2871
      locking.LEVEL_NODE: locking.ALL_SET,
2872
      locking.LEVEL_INSTANCE: locking.ALL_SET,
2873
    }
2874
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2875

    
2876
  def Exec(self, feedback_fn):
2877
    """Verify integrity of cluster disks.
2878

2879
    @rtype: tuple of three items
2880
    @return: a tuple of (dict of node-to-node_error, list of instances
2881
        which need activate-disks, dict of instance: (node, volume) for
2882
        missing volumes
2883

2884
    """
2885
    result = res_nodes, res_instances, res_missing = {}, [], {}
2886

    
2887
    nodes = utils.NiceSort(self.cfg.GetVmCapableNodeList())
2888
    instances = self.cfg.GetAllInstancesInfo().values()
2889

    
2890
    nv_dict = {}
2891
    for inst in instances:
2892
      inst_lvs = {}
2893
      if not inst.admin_up:
2894
        continue
2895
      inst.MapLVsByNode(inst_lvs)
2896
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
2897
      for node, vol_list in inst_lvs.iteritems():
2898
        for vol in vol_list:
2899
          nv_dict[(node, vol)] = inst
2900

    
2901
    if not nv_dict:
2902
      return result
2903

    
2904
    node_lvs = self.rpc.call_lv_list(nodes, [])
2905
    for node, node_res in node_lvs.items():
2906
      if node_res.offline:
2907
        continue
2908
      msg = node_res.fail_msg
2909
      if msg:
2910
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2911
        res_nodes[node] = msg
2912
        continue
2913

    
2914
      lvs = node_res.payload
2915
      for lv_name, (_, _, lv_online) in lvs.items():
2916
        inst = nv_dict.pop((node, lv_name), None)
2917
        if (not lv_online and inst is not None
2918
            and inst.name not in res_instances):
2919
          res_instances.append(inst.name)
2920

    
2921
    # any leftover items in nv_dict are missing LVs, let's arrange the
2922
    # data better
2923
    for key, inst in nv_dict.iteritems():
2924
      if inst.name not in res_missing:
2925
        res_missing[inst.name] = []
2926
      res_missing[inst.name].append(key)
2927

    
2928
    return result
2929

    
2930

    
2931
class LUClusterRepairDiskSizes(NoHooksLU):
2932
  """Verifies the cluster disks sizes.
2933

2934
  """
2935
  REQ_BGL = False
2936

    
2937
  def ExpandNames(self):
2938
    if self.op.instances:
2939
      self.wanted_names = _GetWantedInstances(self, self.op.instances)
2940
      self.needed_locks = {
2941
        locking.LEVEL_NODE: [],
2942
        locking.LEVEL_INSTANCE: self.wanted_names,
2943
        }
2944
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2945
    else:
2946
      self.wanted_names = None
2947
      self.needed_locks = {
2948
        locking.LEVEL_NODE: locking.ALL_SET,
2949
        locking.LEVEL_INSTANCE: locking.ALL_SET,
2950
        }
2951
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2952

    
2953
  def DeclareLocks(self, level):
2954
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
2955
      self._LockInstancesNodes(primary_only=True)
2956

    
2957
  def CheckPrereq(self):
2958
    """Check prerequisites.
2959

2960
    This only checks the optional instance list against the existing names.
2961

2962
    """
2963
    if self.wanted_names is None:
2964
      self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
2965

    
2966
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2967
                             in self.wanted_names]
2968

    
2969
  def _EnsureChildSizes(self, disk):
2970
    """Ensure children of the disk have the needed disk size.
2971

2972
    This is valid mainly for DRBD8 and fixes an issue where the
2973
    children have smaller disk size.
2974

2975
    @param disk: an L{ganeti.objects.Disk} object
2976

2977
    """
2978
    if disk.dev_type == constants.LD_DRBD8:
2979
      assert disk.children, "Empty children for DRBD8?"
2980
      fchild = disk.children[0]
2981
      mismatch = fchild.size < disk.size
2982
      if mismatch:
2983
        self.LogInfo("Child disk has size %d, parent %d, fixing",
2984
                     fchild.size, disk.size)
2985
        fchild.size = disk.size
2986

    
2987
      # and we recurse on this child only, not on the metadev
2988
      return self._EnsureChildSizes(fchild) or mismatch
2989
    else:
2990
      return False
2991

    
2992
  def Exec(self, feedback_fn):
2993
    """Verify the size of cluster disks.
2994

2995
    """
2996
    # TODO: check child disks too
2997
    # TODO: check differences in size between primary/secondary nodes
2998
    per_node_disks = {}
2999
    for instance in self.wanted_instances:
3000
      pnode = instance.primary_node
3001
      if pnode not in per_node_disks:
3002
        per_node_disks[pnode] = []
3003
      for idx, disk in enumerate(instance.disks):
3004
        per_node_disks[pnode].append((instance, idx, disk))
3005

    
3006
    changed = []
3007
    for node, dskl in per_node_disks.items():
3008
      newl = [v[2].Copy() for v in dskl]
3009
      for dsk in newl:
3010
        self.cfg.SetDiskID(dsk, node)
3011
      result = self.rpc.call_blockdev_getsize(node, newl)
3012
      if result.fail_msg:
3013
        self.LogWarning("Failure in blockdev_getsize call to node"
3014
                        " %s, ignoring", node)
3015
        continue
3016
      if len(result.payload) != len(dskl):
3017
        logging.warning("Invalid result from node %s: len(dksl)=%d,"
3018
                        " result.payload=%s", node, len(dskl), result.payload)
3019
        self.LogWarning("Invalid result from node %s, ignoring node results",
3020
                        node)
3021
        continue
3022
      for ((instance, idx, disk), size) in zip(dskl, result.payload):
3023
        if size is None:
3024
          self.LogWarning("Disk %d of instance %s did not return size"
3025
                          " information, ignoring", idx, instance.name)
3026
          continue
3027
        if not isinstance(size, (int, long)):
3028
          self.LogWarning("Disk %d of instance %s did not return valid"
3029
                          " size information, ignoring", idx, instance.name)
3030
          continue
3031
        size = size >> 20
3032
        if size != disk.size:
3033
          self.LogInfo("Disk %d of instance %s has mismatched size,"
3034
                       " correcting: recorded %d, actual %d", idx,
3035
                       instance.name, disk.size, size)
3036
          disk.size = size
3037
          self.cfg.Update(instance, feedback_fn)
3038
          changed.append((instance.name, idx, size))
3039
        if self._EnsureChildSizes(disk):
3040
          self.cfg.Update(instance, feedback_fn)
3041
          changed.append((instance.name, idx, disk.size))
3042
    return changed
3043

    
3044

    
3045
class LUClusterRename(LogicalUnit):
3046
  """Rename the cluster.
3047

3048
  """
3049
  HPATH = "cluster-rename"
3050
  HTYPE = constants.HTYPE_CLUSTER
3051

    
3052
  def BuildHooksEnv(self):
3053
    """Build hooks env.
3054

3055
    """
3056
    return {
3057
      "OP_TARGET": self.cfg.GetClusterName(),
3058
      "NEW_NAME": self.op.name,
3059
      }
3060

    
3061
  def BuildHooksNodes(self):
3062
    """Build hooks nodes.
3063

3064
    """
3065
    return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList())
3066

    
3067
  def CheckPrereq(self):
3068
    """Verify that the passed name is a valid one.
3069

3070
    """
3071
    hostname = netutils.GetHostname(name=self.op.name,
3072
                                    family=self.cfg.GetPrimaryIPFamily())
3073

    
3074
    new_name = hostname.name
3075
    self.ip = new_ip = hostname.ip
3076
    old_name = self.cfg.GetClusterName()
3077
    old_ip = self.cfg.GetMasterIP()
3078
    if new_name == old_name and new_ip == old_ip:
3079
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
3080
                                 " cluster has changed",
3081
                                 errors.ECODE_INVAL)
3082
    if new_ip != old_ip:
3083
      if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
3084
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
3085
                                   " reachable on the network" %
3086
                                   new_ip, errors.ECODE_NOTUNIQUE)
3087

    
3088
    self.op.name = new_name
3089

    
3090
  def Exec(self, feedback_fn):
3091
    """Rename the cluster.
3092

3093
    """
3094
    clustername = self.op.name
3095
    ip = self.ip
3096

    
3097
    # shutdown the master IP
3098
    master = self.cfg.GetMasterNode()
3099
    result = self.rpc.call_node_stop_master(master, False)
3100
    result.Raise("Could not disable the master role")
3101

    
3102
    try:
3103
      cluster = self.cfg.GetClusterInfo()
3104
      cluster.cluster_name = clustername
3105
      cluster.master_ip = ip
3106
      self.cfg.Update(cluster, feedback_fn)
3107

    
3108
      # update the known hosts file
3109
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
3110
      node_list = self.cfg.GetOnlineNodeList()
3111
      try:
3112
        node_list.remove(master)
3113
      except ValueError:
3114
        pass
3115
      _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
3116
    finally:
3117
      result = self.rpc.call_node_start_master(master, False, False)
3118
      msg = result.fail_msg
3119
      if msg:
3120
        self.LogWarning("Could not re-enable the master role on"
3121
                        " the master, please restart manually: %s", msg)
3122

    
3123
    return clustername
3124

    
3125

    
3126
class LUClusterSetParams(LogicalUnit):
3127
  """Change the parameters of the cluster.
3128

3129
  """
3130
  HPATH = "cluster-modify"
3131
  HTYPE = constants.HTYPE_CLUSTER
3132
  REQ_BGL = False
3133

    
3134
  def CheckArguments(self):
3135
    """Check parameters
3136

3137
    """
3138
    if self.op.uid_pool:
3139
      uidpool.CheckUidPool(self.op.uid_pool)
3140

    
3141
    if self.op.add_uids:
3142
      uidpool.CheckUidPool(self.op.add_uids)
3143

    
3144
    if self.op.remove_uids:
3145
      uidpool.CheckUidPool(self.op.remove_uids)
3146

    
3147
  def ExpandNames(self):
3148
    # FIXME: in the future maybe other cluster params won't require checking on
3149
    # all nodes to be modified.
3150
    self.needed_locks = {
3151
      locking.LEVEL_NODE: locking.ALL_SET,
3152
    }
3153
    self.share_locks[locking.LEVEL_NODE] = 1
3154

    
3155
  def BuildHooksEnv(self):
3156
    """Build hooks env.
3157

3158
    """
3159
    return {
3160
      "OP_TARGET": self.cfg.GetClusterName(),
3161
      "NEW_VG_NAME": self.op.vg_name,
3162
      }
3163

    
3164
  def BuildHooksNodes(self):
3165
    """Build hooks nodes.
3166

3167
    """
3168
    mn = self.cfg.GetMasterNode()
3169
    return ([mn], [mn])
3170

    
3171
  def CheckPrereq(self):
3172
    """Check prerequisites.
3173

3174
    This checks whether the given params don't conflict and
3175
    if the given volume group is valid.
3176

3177
    """
3178
    if self.op.vg_name is not None and not self.op.vg_name:
3179
      if self.cfg.HasAnyDiskOfType(constants.LD_LV):
3180
        raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
3181
                                   " instances exist", errors.ECODE_INVAL)
3182

    
3183
    if self.op.drbd_helper is not None and not self.op.drbd_helper:
3184
      if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
3185
        raise errors.OpPrereqError("Cannot disable drbd helper while"
3186
                                   " drbd-based instances exist",
3187
                                   errors.ECODE_INVAL)
3188

    
3189
    node_list = self.glm.list_owned(locking.LEVEL_NODE)
3190

    
3191
    # if vg_name not None, checks given volume group on all nodes
3192
    if self.op.vg_name:
3193
      vglist = self.rpc.call_vg_list(node_list)
3194
      for node in node_list:
3195
        msg = vglist[node].fail_msg
3196
        if msg:
3197
          # ignoring down node
3198
          self.LogWarning("Error while gathering data on node %s"
3199
                          " (ignoring node): %s", node, msg)
3200
          continue
3201
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
3202
                                              self.op.vg_name,
3203
                                              constants.MIN_VG_SIZE)
3204
        if vgstatus:
3205
          raise errors.OpPrereqError("Error on node '%s': %s" %
3206
                                     (node, vgstatus), errors.ECODE_ENVIRON)
3207

    
3208
    if self.op.drbd_helper:
3209
      # checks given drbd helper on all nodes
3210
      helpers = self.rpc.call_drbd_helper(node_list)
3211
      for node in node_list:
3212
        ninfo = self.cfg.GetNodeInfo(node)
3213
        if ninfo.offline:
3214
          self.LogInfo("Not checking drbd helper on offline node %s", node)
3215
          continue
3216
        msg = helpers[node].fail_msg
3217
        if msg:
3218
          raise errors.OpPrereqError("Error checking drbd helper on node"
3219
                                     " '%s': %s" % (node, msg),
3220
                                     errors.ECODE_ENVIRON)
3221
        node_helper = helpers[node].payload
3222
        if node_helper != self.op.drbd_helper:
3223
          raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
3224
                                     (node, node_helper), errors.ECODE_ENVIRON)
3225

    
3226
    self.cluster = cluster = self.cfg.GetClusterInfo()
3227
    # validate params changes
3228
    if self.op.beparams:
3229
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
3230
      self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
3231

    
3232
    if self.op.ndparams:
3233
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
3234
      self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
3235

    
3236
      # TODO: we need a more general way to handle resetting
3237
      # cluster-level parameters to default values
3238
      if self.new_ndparams["oob_program"] == "":
3239
        self.new_ndparams["oob_program"] = \
3240
            constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
3241

    
3242
    if self.op.nicparams:
3243
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
3244
      self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
3245
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
3246
      nic_errors = []
3247

    
3248
      # check all instances for consistency
3249
      for instance in self.cfg.GetAllInstancesInfo().values():
3250
        for nic_idx, nic in enumerate(instance.nics):
3251
          params_copy = copy.deepcopy(nic.nicparams)
3252
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
3253

    
3254
          # check parameter syntax
3255
          try:
3256
            objects.NIC.CheckParameterSyntax(params_filled)
3257
          except errors.ConfigurationError, err:
3258
            nic_errors.append("Instance %s, nic/%d: %s" %
3259
                              (instance.name, nic_idx, err))
3260

    
3261
          # if we're moving instances to routed, check that they have an ip
3262
          target_mode = params_filled[constants.NIC_MODE]
3263
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
3264
            nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
3265
                              " address" % (instance.name, nic_idx))
3266
      if nic_errors:
3267
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
3268
                                   "\n".join(nic_errors))
3269

    
3270
    # hypervisor list/parameters
3271
    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
3272
    if self.op.hvparams:
3273
      for hv_name, hv_dict in self.op.hvparams.items():
3274
        if hv_name not in self.new_hvparams:
3275
          self.new_hvparams[hv_name] = hv_dict
3276
        else:
3277
          self.new_hvparams[hv_name].update(hv_dict)
3278

    
3279
    # os hypervisor parameters
3280
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
3281
    if self.op.os_hvp:
3282
      for os_name, hvs in self.op.os_hvp.items():
3283
        if os_name not in self.new_os_hvp:
3284
          self.new_os_hvp[os_name] = hvs
3285
        else:
3286
          for hv_name, hv_dict in hvs.items():
3287
            if hv_name not in self.new_os_hvp[os_name]:
3288
              self.new_os_hvp[os_name][hv_name] = hv_dict
3289
            else:
3290
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
3291

    
3292
    # os parameters
3293
    self.new_osp = objects.FillDict(cluster.osparams, {})
3294
    if self.op.osparams:
3295
      for os_name, osp in self.op.osparams.items():
3296
        if os_name not in self.new_osp:
3297
          self.new_osp[os_name] = {}
3298

    
3299
        self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
3300
                                                  use_none=True)
3301

    
3302
        if not self.new_osp[os_name]:
3303
          # we removed all parameters
3304
          del self.new_osp[os_name]
3305
        else:
3306
          # check the parameter validity (remote check)
3307
          _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
3308
                         os_name, self.new_osp[os_name])
3309

    
3310
    # changes to the hypervisor list
3311
    if self.op.enabled_hypervisors is not None:
3312
      self.hv_list = self.op.enabled_hypervisors
3313
      for hv in self.hv_list:
3314
        # if the hypervisor doesn't already exist in the cluster
3315
        # hvparams, we initialize it to empty, and then (in both
3316
        # cases) we make sure to fill the defaults, as we might not
3317
        # have a complete defaults list if the hypervisor wasn't
3318
        # enabled before
3319
        if hv not in new_hvp:
3320
          new_hvp[hv] = {}
3321
        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
3322
        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
3323
    else:
3324
      self.hv_list = cluster.enabled_hypervisors
3325

    
3326
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
3327
      # either the enabled list has changed, or the parameters have, validate
3328
      for hv_name, hv_params in self.new_hvparams.items():
3329
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
3330
            (self.op.enabled_hypervisors and
3331
             hv_name in self.op.enabled_hypervisors)):
3332
          # either this is a new hypervisor, or its parameters have changed
3333
          hv_class = hypervisor.GetHypervisor(hv_name)
3334
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
3335
          hv_class.CheckParameterSyntax(hv_params)
3336
          _CheckHVParams(self, node_list, hv_name, hv_params)
3337

    
3338
    if self.op.os_hvp:
3339
      # no need to check any newly-enabled hypervisors, since the
3340
      # defaults have already been checked in the above code-block
3341
      for os_name, os_hvp in self.new_os_hvp.items():
3342
        for hv_name, hv_params in os_hvp.items():
3343
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
3344
          # we need to fill in the new os_hvp on top of the actual hv_p
3345
          cluster_defaults = self.new_hvparams.get(hv_name, {})
3346
          new_osp = objects.FillDict(cluster_defaults, hv_params)
3347
          hv_class = hypervisor.GetHypervisor(hv_name)
3348
          hv_class.CheckParameterSyntax(new_osp)
3349
          _CheckHVParams(self, node_list, hv_name, new_osp)
3350

    
3351
    if self.op.default_iallocator:
3352
      alloc_script = utils.FindFile(self.op.default_iallocator,
3353
                                    constants.IALLOCATOR_SEARCH_PATH,
3354
                                    os.path.isfile)
3355
      if alloc_script is None:
3356
        raise errors.OpPrereqError("Invalid default iallocator script '%s'"
3357
                                   " specified" % self.op.default_iallocator,
3358
                                   errors.ECODE_INVAL)
3359

    
3360
  def Exec(self, feedback_fn):
3361
    """Change the parameters of the cluster.
3362

3363
    """
3364
    if self.op.vg_name is not None:
3365
      new_volume = self.op.vg_name
3366
      if not new_volume:
3367
        new_volume = None
3368
      if new_volume != self.cfg.GetVGName():
3369
        self.cfg.SetVGName(new_volume)
3370
      else:
3371
        feedback_fn("Cluster LVM configuration already in desired"
3372
                    " state, not changing")
3373
    if self.op.drbd_helper is not None:
3374
      new_helper = self.op.drbd_helper
3375
      if not new_helper:
3376
        new_helper = None
3377
      if new_helper != self.cfg.GetDRBDHelper():
3378
        self.cfg.SetDRBDHelper(new_helper)
3379
      else:
3380
        feedback_fn("Cluster DRBD helper already in desired state,"
3381
                    " not changing")
3382
    if self.op.hvparams:
3383
      self.cluster.hvparams = self.new_hvparams
3384
    if self.op.os_hvp:
3385
      self.cluster.os_hvp = self.new_os_hvp
3386
    if self.op.enabled_hypervisors is not None:
3387
      self.cluster.hvparams = self.new_hvparams
3388
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
3389
    if self.op.beparams:
3390
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
3391
    if self.op.nicparams:
3392
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
3393
    if self.op.osparams:
3394
      self.cluster.osparams = self.new_osp
3395
    if self.op.ndparams:
3396
      self.cluster.ndparams = self.new_ndparams
3397

    
3398
    if self.op.candidate_pool_size is not None:
3399
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
3400
      # we need to update the pool size here, otherwise the save will fail
3401
      _AdjustCandidatePool(self, [])
3402

    
3403
    if self.op.maintain_node_health is not None:
3404
      self.cluster.maintain_node_health = self.op.maintain_node_health
3405

    
3406
    if self.op.prealloc_wipe_disks is not None:
3407
      self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
3408

    
3409
    if self.op.add_uids is not None:
3410
      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
3411

    
3412
    if self.op.remove_uids is not None:
3413
      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
3414

    
3415
    if self.op.uid_pool is not None:
3416
      self.cluster.uid_pool = self.op.uid_pool
3417

    
3418
    if self.op.default_iallocator is not None:
3419
      self.cluster.default_iallocator = self.op.default_iallocator
3420

    
3421
    if self.op.reserved_lvs is not None:
3422
      self.cluster.reserved_lvs = self.op.reserved_lvs
3423

    
3424
    def helper_os(aname, mods, desc):
3425
      desc += " OS list"
3426
      lst = getattr(self.cluster, aname)
3427
      for key, val in mods:
3428
        if key == constants.DDM_ADD:
3429
          if val in lst:
3430
            feedback_fn("OS %s already in %s, ignoring" % (val, desc))
3431
          else:
3432
            lst.append(val)
3433
        elif key == constants.DDM_REMOVE:
3434
          if val in lst:
3435
            lst.remove(val)
3436
          else:
3437
            feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
3438
        else:
3439
          raise errors.ProgrammerError("Invalid modification '%s'" % key)
3440

    
3441
    if self.op.hidden_os:
3442
      helper_os("hidden_os", self.op.hidden_os, "hidden")
3443

    
3444
    if self.op.blacklisted_os:
3445
      helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
3446

    
3447
    if self.op.master_netdev:
3448
      master = self.cfg.GetMasterNode()
3449
      feedback_fn("Shutting down master ip on the current netdev (%s)" %
3450
                  self.cluster.master_netdev)
3451
      result = self.rpc.call_node_stop_master(master, False)
3452
      result.Raise("Could not disable the master ip")
3453
      feedback_fn("Changing master_netdev from %s to %s" %
3454
                  (self.cluster.master_netdev, self.op.master_netdev))
3455
      self.cluster.master_netdev = self.op.master_netdev
3456

    
3457
    self.cfg.Update(self.cluster, feedback_fn)
3458

    
3459
    if self.op.master_netdev:
3460
      feedback_fn("Starting the master ip on the new master netdev (%s)" %
3461
                  self.op.master_netdev)
3462
      result = self.rpc.call_node_start_master(master, False, False)
3463
      if result.fail_msg:
3464
        self.LogWarning("Could not re-enable the master ip on"
3465
                        " the master, please restart manually: %s",
3466
                        result.fail_msg)
3467

    
3468

    
3469
def _UploadHelper(lu, nodes, fname):
3470
  """Helper for uploading a file and showing warnings.
3471

3472
  """
3473
  if os.path.exists(fname):
3474
    result = lu.rpc.call_upload_file(nodes, fname)
3475
    for to_node, to_result in result.items():
3476
      msg = to_result.fail_msg
3477
      if msg:
3478
        msg = ("Copy of file %s to node %s failed: %s" %
3479
               (fname, to_node, msg))
3480
        lu.proc.LogWarning(msg)
3481

    
3482

    
3483
def _ComputeAncillaryFiles(cluster, redist):
3484
  """Compute files external to Ganeti which need to be consistent.
3485

3486
  @type redist: boolean
3487
  @param redist: Whether to include files which need to be redistributed
3488

3489
  """
3490
  # Compute files for all nodes
3491
  files_all = set([
3492
    constants.SSH_KNOWN_HOSTS_FILE,
3493
    constants.CONFD_HMAC_KEY,
3494
    constants.CLUSTER_DOMAIN_SECRET_FILE,
3495
    ])
3496

    
3497
  if not redist:
3498
    files_all.update(constants.ALL_CERT_FILES)
3499
    files_all.update(ssconf.SimpleStore().GetFileList())
3500

    
3501
  if cluster.modify_etc_hosts:
3502
    files_all.add(constants.ETC_HOSTS)
3503

    
3504
  # Files which must either exist on all nodes or on none
3505
  files_all_opt = set([
3506
    constants.RAPI_USERS_FILE,
3507
    ])
3508

    
3509
  # Files which should only be on master candidates
3510
  files_mc = set()
3511
  if not redist:
3512
    files_mc.add(constants.CLUSTER_CONF_FILE)
3513

    
3514
  # Files which should only be on VM-capable nodes
3515
  files_vm = set(filename
3516
    for hv_name in cluster.enabled_hypervisors
3517
    for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles())
3518

    
3519
  # Filenames must be unique
3520
  assert (len(files_all | files_all_opt | files_mc | files_vm) ==
3521
          sum(map(len, [files_all, files_all_opt, files_mc, files_vm]))), \
3522
         "Found file listed in more than one file list"
3523

    
3524
  return (files_all, files_all_opt, files_mc, files_vm)
3525

    
3526

    
3527
def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
3528
  """Distribute additional files which are part of the cluster configuration.
3529

3530
  ConfigWriter takes care of distributing the config and ssconf files, but
3531
  there are more files which should be distributed to all nodes. This function
3532
  makes sure those are copied.
3533

3534
  @param lu: calling logical unit
3535
  @param additional_nodes: list of nodes not in the config to distribute to
3536
  @type additional_vm: boolean
3537
  @param additional_vm: whether the additional nodes are vm-capable or not
3538

3539
  """
3540
  # Gather target nodes
3541
  cluster = lu.cfg.GetClusterInfo()
3542
  master_info = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
3543

    
3544
  online_nodes = lu.cfg.GetOnlineNodeList()
3545
  vm_nodes = lu.cfg.GetVmCapableNodeList()
3546

    
3547
  if additional_nodes is not None:
3548
    online_nodes.extend(additional_nodes)
3549
    if additional_vm:
3550
      vm_nodes.extend(additional_nodes)
3551

    
3552
  # Never distribute to master node
3553
  for nodelist in [online_nodes, vm_nodes]:
3554
    if master_info.name in nodelist:
3555
      nodelist.remove(master_info.name)
3556

    
3557
  # Gather file lists
3558
  (files_all, files_all_opt, files_mc, files_vm) = \
3559
    _ComputeAncillaryFiles(cluster, True)
3560

    
3561
  # Never re-distribute configuration file from here
3562
  assert not (constants.CLUSTER_CONF_FILE in files_all or
3563
              constants.CLUSTER_CONF_FILE in files_vm)
3564
  assert not files_mc, "Master candidates not handled in this function"
3565

    
3566
  filemap = [
3567
    (online_nodes, files_all),
3568
    (online_nodes, files_all_opt),
3569
    (vm_nodes, files_vm),
3570
    ]
3571

    
3572
  # Upload the files
3573
  for (node_list, files) in filemap:
3574
    for fname in files:
3575
      _UploadHelper(lu, node_list, fname)
3576

    
3577

    
3578
class LUClusterRedistConf(NoHooksLU):
3579
  """Force the redistribution of cluster configuration.
3580

3581
  This is a very simple LU.
3582

3583
  """
3584
  REQ_BGL = False
3585

    
3586
  def ExpandNames(self):
3587
    self.needed_locks = {
3588
      locking.LEVEL_NODE: locking.ALL_SET,
3589
    }
3590
    self.share_locks[locking.LEVEL_NODE] = 1
3591

    
3592
  def Exec(self, feedback_fn):
3593
    """Redistribute the configuration.
3594

3595
    """
3596
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
3597
    _RedistributeAncillaryFiles(self)
3598

    
3599

    
3600
def _WaitForSync(lu, instance, disks=None, oneshot=False):
3601
  """Sleep and poll for an instance's disk to sync.
3602

3603
  """
3604
  if not instance.disks or disks is not None and not disks:
3605
    return True
3606

    
3607
  disks = _ExpandCheckDisks(instance, disks)
3608

    
3609
  if not oneshot:
3610
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
3611

    
3612
  node = instance.primary_node
3613

    
3614
  for dev in disks:
3615
    lu.cfg.SetDiskID(dev, node)
3616

    
3617
  # TODO: Convert to utils.Retry
3618

    
3619
  retries = 0
3620
  degr_retries = 10 # in seconds, as we sleep 1 second each time
3621
  while True:
3622
    max_time = 0
3623
    done = True
3624
    cumul_degraded = False
3625
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
3626
    msg = rstats.fail_msg
3627
    if msg:
3628
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
3629
      retries += 1
3630
      if retries >= 10:
3631
        raise errors.RemoteError("Can't contact node %s for mirror data,"
3632
                                 " aborting." % node)
3633
      time.sleep(6)
3634
      continue
3635
    rstats = rstats.payload
3636
    retries = 0
3637
    for i, mstat in enumerate(rstats):
3638
      if mstat is None:
3639
        lu.LogWarning("Can't compute data for node %s/%s",
3640
                           node, disks[i].iv_name)
3641
        continue
3642

    
3643
      cumul_degraded = (cumul_degraded or
3644
                        (mstat.is_degraded and mstat.sync_percent is None))
3645
      if mstat.sync_percent is not None:
3646
        done = False
3647
        if mstat.estimated_time is not None:
3648
          rem_time = ("%s remaining (estimated)" %
3649
                      utils.FormatSeconds(mstat.estimated_time))
3650
          max_time = mstat.estimated_time
3651
        else:
3652
          rem_time = "no time estimate"
3653
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3654
                        (disks[i].iv_name, mstat.sync_percent, rem_time))
3655

    
3656
    # if we're done but degraded, let's do a few small retries, to
3657
    # make sure we see a stable and not transient situation; therefore
3658
    # we force restart of the loop
3659
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
3660
      logging.info("Degraded disks found, %d retries left", degr_retries)
3661
      degr_retries -= 1
3662
      time.sleep(1)
3663
      continue
3664

    
3665
    if done or oneshot:
3666
      break
3667

    
3668
    time.sleep(min(60, max_time))
3669

    
3670
  if done:
3671
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3672
  return not cumul_degraded
3673

    
3674

    
3675
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3676
  """Check that mirrors are not degraded.
3677

3678
  The ldisk parameter, if True, will change the test from the
3679
  is_degraded attribute (which represents overall non-ok status for
3680
  the device(s)) to the ldisk (representing the local storage status).
3681

3682
  """
3683
  lu.cfg.SetDiskID(dev, node)
3684

    
3685
  result = True
3686

    
3687
  if on_primary or dev.AssembleOnSecondary():
3688
    rstats = lu.rpc.call_blockdev_find(node, dev)
3689
    msg = rstats.fail_msg
3690
    if msg:
3691
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3692
      result = False
3693
    elif not rstats.payload:
3694
      lu.LogWarning("Can't find disk on node %s", node)
3695
      result = False
3696
    else:
3697
      if ldisk:
3698
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3699
      else:
3700
        result = result and not rstats.payload.is_degraded
3701

    
3702
  if dev.children:
3703
    for child in dev.children:
3704
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3705

    
3706
  return result
3707

    
3708

    
3709
class LUOobCommand(NoHooksLU):
3710
  """Logical unit for OOB handling.
3711

3712
  """
3713
  REG_BGL = False
3714
  _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
3715

    
3716
  def ExpandNames(self):
3717
    """Gather locks we need.
3718

3719
    """
3720
    if self.op.node_names:
3721
      self.op.node_names = _GetWantedNodes(self, self.op.node_names)
3722
      lock_names = self.op.node_names
3723
    else:
3724
      lock_names = locking.ALL_SET
3725

    
3726
    self.needed_locks = {
3727
      locking.LEVEL_NODE: lock_names,
3728
      }
3729

    
3730
  def CheckPrereq(self):
3731
    """Check prerequisites.
3732

3733
    This checks:
3734
     - the node exists in the configuration
3735
     - OOB is supported
3736

3737
    Any errors are signaled by raising errors.OpPrereqError.
3738

3739
    """
3740
    self.nodes = []
3741
    self.master_node = self.cfg.GetMasterNode()
3742

    
3743
    assert self.op.power_delay >= 0.0
3744

    
3745
    if self.op.node_names:
3746
      if (self.op.command in self._SKIP_MASTER and
3747
          self.master_node in self.op.node_names):
3748
        master_node_obj = self.cfg.GetNodeInfo(self.master_node)
3749
        master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
3750

    
3751
        if master_oob_handler:
3752
          additional_text = ("run '%s %s %s' if you want to operate on the"
3753
                             " master regardless") % (master_oob_handler,
3754
                                                      self.op.command,
3755
                                                      self.master_node)
3756
        else:
3757
          additional_text = "it does not support out-of-band operations"
3758

    
3759
        raise errors.OpPrereqError(("Operating on the master node %s is not"
3760
                                    " allowed for %s; %s") %
3761
                                   (self.master_node, self.op.command,
3762
                                    additional_text), errors.ECODE_INVAL)
3763
    else:
3764
      self.op.node_names = self.cfg.GetNodeList()
3765
      if self.op.command in self._SKIP_MASTER:
3766
        self.op.node_names.remove(self.master_node)
3767

    
3768
    if self.op.command in self._SKIP_MASTER:
3769
      assert self.master_node not in self.op.node_names
3770

    
3771
    for node_name in self.op.node_names:
3772
      node = self.cfg.GetNodeInfo(node_name)
3773

    
3774
      if node is None:
3775
        raise errors.OpPrereqError("Node %s not found" % node_name,
3776
                                   errors.ECODE_NOENT)
3777
      else:
3778
        self.nodes.append(node)
3779

    
3780
      if (not self.op.ignore_status and
3781
          (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
3782
        raise errors.OpPrereqError(("Cannot power off node %s because it is"
3783
                                    " not marked offline") % node_name,
3784
                                   errors.ECODE_STATE)
3785

    
3786
  def Exec(self, feedback_fn):
3787
    """Execute OOB and return result if we expect any.
3788

3789
    """
3790
    master_node = self.master_node
3791
    ret = []
3792

    
3793
    for idx, node in enumerate(utils.NiceSort(self.nodes,
3794
                                              key=lambda node: node.name)):
3795
      node_entry = [(constants.RS_NORMAL, node.name)]
3796
      ret.append(node_entry)
3797

    
3798
      oob_program = _SupportsOob(self.cfg, node)
3799

    
3800
      if not oob_program:
3801
        node_entry.append((constants.RS_UNAVAIL, None))
3802
        continue
3803

    
3804
      logging.info("Executing out-of-band command '%s' using '%s' on %s",
3805
                   self.op.command, oob_program, node.name)
3806
      result = self.rpc.call_run_oob(master_node, oob_program,
3807
                                     self.op.command, node.name,
3808
                                     self.op.timeout)
3809

    
3810
      if result.fail_msg:
3811
        self.LogWarning("Out-of-band RPC failed on node '%s': %s",
3812
                        node.name, result.fail_msg)
3813
        node_entry.append((constants.RS_NODATA, None))
3814
      else:
3815
        try:
3816
          self._CheckPayload(result)
3817
        except errors.OpExecError, err:
3818
          self.LogWarning("Payload returned by node '%s' is not valid: %s",
3819
                          node.name, err)
3820
          node_entry.append((constants.RS_NODATA, None))
3821
        else:
3822
          if self.op.command == constants.OOB_HEALTH:
3823
            # For health we should log important events
3824
            for item, status in result.payload:
3825
              if status in [constants.OOB_STATUS_WARNING,
3826
                            constants.OOB_STATUS_CRITICAL]:
3827
                self.LogWarning("Item '%s' on node '%s' has status '%s'",
3828
                                item, node.name, status)
3829

    
3830
          if self.op.command == constants.OOB_POWER_ON:
3831
            node.powered = True
3832
          elif self.op.command == constants.OOB_POWER_OFF:
3833
            node.powered = False
3834
          elif self.op.command == constants.OOB_POWER_STATUS:
3835
            powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
3836
            if powered != node.powered:
3837
              logging.warning(("Recorded power state (%s) of node '%s' does not"
3838
                               " match actual power state (%s)"), node.powered,
3839
                              node.name, powered)
3840

    
3841
          # For configuration changing commands we should update the node
3842
          if self.op.command in (constants.OOB_POWER_ON,
3843
                                 constants.OOB_POWER_OFF):
3844
            self.cfg.Update(node, feedback_fn)
3845

    
3846
          node_entry.append((constants.RS_NORMAL, result.payload))
3847

    
3848
          if (self.op.command == constants.OOB_POWER_ON and
3849
              idx < len(self.nodes) - 1):
3850
            time.sleep(self.op.power_delay)
3851

    
3852
    return ret
3853

    
3854
  def _CheckPayload(self, result):
3855
    """Checks if the payload is valid.
3856

3857
    @param result: RPC result
3858
    @raises errors.OpExecError: If payload is not valid
3859

3860
    """
3861
    errs = []
3862
    if self.op.command == constants.OOB_HEALTH:
3863
      if not isinstance(result.payload, list):
3864
        errs.append("command 'health' is expected to return a list but got %s" %
3865
                    type(result.payload))
3866
      else:
3867
        for item, status in result.payload:
3868
          if status not in constants.OOB_STATUSES:
3869
            errs.append("health item '%s' has invalid status '%s'" %
3870
                        (item, status))
3871

    
3872
    if self.op.command == constants.OOB_POWER_STATUS:
3873
      if not isinstance(result.payload, dict):
3874
        errs.append("power-status is expected to return a dict but got %s" %
3875
                    type(result.payload))
3876

    
3877
    if self.op.command in [
3878
        constants.OOB_POWER_ON,
3879
        constants.OOB_POWER_OFF,
3880
        constants.OOB_POWER_CYCLE,
3881
        ]:
3882
      if result.payload is not None:
3883
        errs.append("%s is expected to not return payload but got '%s'" %
3884
                    (self.op.command, result.payload))
3885

    
3886
    if errs:
3887
      raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
3888
                               utils.CommaJoin(errs))
3889

    
3890
class _OsQuery(_QueryBase):
3891
  FIELDS = query.OS_FIELDS
3892

    
3893
  def ExpandNames(self, lu):
3894
    # Lock all nodes in shared mode
3895
    # Temporary removal of locks, should be reverted later
3896
    # TODO: reintroduce locks when they are lighter-weight
3897
    lu.needed_locks = {}
3898
    #self.share_locks[locking.LEVEL_NODE] = 1
3899
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3900

    
3901
    # The following variables interact with _QueryBase._GetNames
3902
    if self.names:
3903
      self.wanted = self.names
3904
    else:
3905
      self.wanted = locking.ALL_SET
3906

    
3907
    self.do_locking = self.use_locking
3908

    
3909
  def DeclareLocks(self, lu, level):
3910
    pass
3911

    
3912
  @staticmethod
3913
  def _DiagnoseByOS(rlist):
3914
    """Remaps a per-node return list into an a per-os per-node dictionary
3915

3916
    @param rlist: a map with node names as keys and OS objects as values
3917

3918
    @rtype: dict
3919
    @return: a dictionary with osnames as keys and as value another
3920
        map, with nodes as keys and tuples of (path, status, diagnose,
3921
        variants, parameters, api_versions) as values, eg::
3922

3923
          {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
3924
                                     (/srv/..., False, "invalid api")],
3925
                           "node2": [(/srv/..., True, "", [], [])]}
3926
          }
3927

3928
    """
3929
    all_os = {}
3930
    # we build here the list of nodes that didn't fail the RPC (at RPC
3931
    # level), so that nodes with a non-responding node daemon don't
3932
    # make all OSes invalid
3933
    good_nodes = [node_name for node_name in rlist
3934
                  if not rlist[node_name].fail_msg]
3935
    for node_name, nr in rlist.items():
3936
      if nr.fail_msg or not nr.payload:
3937
        continue
3938
      for (name, path, status, diagnose, variants,
3939
           params, api_versions) in nr.payload:
3940
        if name not in all_os:
3941
          # build a list of nodes for this os containing empty lists
3942
          # for each node in node_list
3943
          all_os[name] = {}
3944
          for nname in good_nodes:
3945
            all_os[name][nname] = []
3946
        # convert params from [name, help] to (name, help)
3947
        params = [tuple(v) for v in params]
3948
        all_os[name][node_name].append((path, status, diagnose,
3949
                                        variants, params, api_versions))
3950
    return all_os
3951

    
3952
  def _GetQueryData(self, lu):
3953
    """Computes the list of nodes and their attributes.
3954

3955
    """
3956
    # Locking is not used
3957
    assert not (compat.any(lu.glm.is_owned(level)
3958
                           for level in locking.LEVELS
3959
                           if level != locking.LEVEL_CLUSTER) or
3960
                self.do_locking or self.use_locking)
3961

    
3962
    valid_nodes = [node.name
3963
                   for node in lu.cfg.GetAllNodesInfo().values()
3964
                   if not node.offline and node.vm_capable]
3965
    pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_nodes))
3966
    cluster = lu.cfg.GetClusterInfo()
3967

    
3968
    data = {}
3969

    
3970
    for (os_name, os_data) in pol.items():
3971
      info = query.OsInfo(name=os_name, valid=True, node_status=os_data,
3972
                          hidden=(os_name in cluster.hidden_os),
3973
                          blacklisted=(os_name in cluster.blacklisted_os))
3974

    
3975
      variants = set()
3976
      parameters = set()
3977
      api_versions = set()
3978

    
3979
      for idx, osl in enumerate(os_data.values()):
3980
        info.valid = bool(info.valid and osl and osl[0][1])
3981
        if not info.valid:
3982
          break
3983

    
3984
        (node_variants, node_params, node_api) = osl[0][3:6]
3985
        if idx == 0:
3986
          # First entry
3987
          variants.update(node_variants)
3988
          parameters.update(node_params)
3989
          api_versions.update(node_api)
3990
        else:
3991
          # Filter out inconsistent values
3992
          variants.intersection_update(node_variants)
3993
          parameters.intersection_update(node_params)
3994
          api_versions.intersection_update(node_api)
3995

    
3996
      info.variants = list(variants)
3997
      info.parameters = list(parameters)
3998
      info.api_versions = list(api_versions)
3999

    
4000
      data[os_name] = info
4001

    
4002
    # Prepare data in requested order
4003
    return [data[name] for name in self._GetNames(lu, pol.keys(), None)
4004
            if name in data]
4005

    
4006

    
4007
class LUOsDiagnose(NoHooksLU):
4008
  """Logical unit for OS diagnose/query.
4009

4010
  """
4011
  REQ_BGL = False
4012

    
4013
  @staticmethod
4014
  def _BuildFilter(fields, names):
4015
    """Builds a filter for querying OSes.
4016

4017
    """
4018
    name_filter = qlang.MakeSimpleFilter("name", names)
4019

    
4020
    # Legacy behaviour: Hide hidden, blacklisted or invalid OSes if the
4021
    # respective field is not requested
4022
    status_filter = [[qlang.OP_NOT, [qlang.OP_TRUE, fname]]
4023
                     for fname in ["hidden", "blacklisted"]
4024
                     if fname not in fields]
4025
    if "valid" not in fields:
4026
      status_filter.append([qlang.OP_TRUE, "valid"])
4027

    
4028
    if status_filter:
4029
      status_filter.insert(0, qlang.OP_AND)
4030
    else:
4031
      status_filter = None
4032

    
4033
    if name_filter and status_filter:
4034
      return [qlang.OP_AND, name_filter, status_filter]
4035
    elif name_filter:
4036
      return name_filter
4037
    else:
4038
      return status_filter
4039

    
4040
  def CheckArguments(self):
4041
    self.oq = _OsQuery(self._BuildFilter(self.op.output_fields, self.op.names),
4042
                       self.op.output_fields, False)
4043

    
4044
  def ExpandNames(self):
4045
    self.oq.ExpandNames(self)
4046

    
4047
  def Exec(self, feedback_fn):
4048
    return self.oq.OldStyleQuery(self)
4049

    
4050

    
4051
class LUNodeRemove(LogicalUnit):
4052
  """Logical unit for removing a node.
4053

4054
  """
4055
  HPATH = "node-remove"
4056
  HTYPE = constants.HTYPE_NODE
4057

    
4058
  def BuildHooksEnv(self):
4059
    """Build hooks env.
4060

4061
    This doesn't run on the target node in the pre phase as a failed
4062
    node would then be impossible to remove.
4063

4064
    """
4065
    return {
4066
      "OP_TARGET": self.op.node_name,
4067
      "NODE_NAME": self.op.node_name,
4068
      }
4069

    
4070
  def BuildHooksNodes(self):
4071
    """Build hooks nodes.
4072

4073
    """
4074
    all_nodes = self.cfg.GetNodeList()
4075
    try:
4076
      all_nodes.remove(self.op.node_name)
4077
    except ValueError:
4078
      logging.warning("Node '%s', which is about to be removed, was not found"
4079
                      " in the list of all nodes", self.op.node_name)
4080
    return (all_nodes, all_nodes)
4081

    
4082
  def CheckPrereq(self):
4083
    """Check prerequisites.
4084

4085
    This checks:
4086
     - the node exists in the configuration
4087
     - it does not have primary or secondary instances
4088
     - it's not the master
4089

4090
    Any errors are signaled by raising errors.OpPrereqError.
4091

4092
    """
4093
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4094
    node = self.cfg.GetNodeInfo(self.op.node_name)
4095
    assert node is not None
4096

    
4097
    instance_list = self.cfg.GetInstanceList()
4098

    
4099
    masternode = self.cfg.GetMasterNode()
4100
    if node.name == masternode:
4101
      raise errors.OpPrereqError("Node is the master node, failover to another"
4102
                                 " node is required", errors.ECODE_INVAL)
4103

    
4104
    for instance_name in instance_list:
4105
      instance = self.cfg.GetInstanceInfo(instance_name)
4106
      if node.name in instance.all_nodes:
4107
        raise errors.OpPrereqError("Instance %s is still running on the node,"
4108
                                   " please remove first" % instance_name,
4109
                                   errors.ECODE_INVAL)
4110
    self.op.node_name = node.name
4111
    self.node = node
4112

    
4113
  def Exec(self, feedback_fn):
4114
    """Removes the node from the cluster.
4115

4116
    """
4117
    node = self.node
4118
    logging.info("Stopping the node daemon and removing configs from node %s",
4119
                 node.name)
4120

    
4121
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
4122

    
4123
    # Promote nodes to master candidate as needed
4124
    _AdjustCandidatePool(self, exceptions=[node.name])
4125
    self.context.RemoveNode(node.name)
4126

    
4127
    # Run post hooks on the node before it's removed
4128
    _RunPostHook(self, node.name)
4129

    
4130
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
4131
    msg = result.fail_msg
4132
    if msg:
4133
      self.LogWarning("Errors encountered on the remote node while leaving"
4134
                      " the cluster: %s", msg)
4135

    
4136
    # Remove node from our /etc/hosts
4137
    if self.cfg.GetClusterInfo().modify_etc_hosts:
4138
      master_node = self.cfg.GetMasterNode()
4139
      result = self.rpc.call_etc_hosts_modify(master_node,
4140
                                              constants.ETC_HOSTS_REMOVE,
4141
                                              node.name, None)
4142
      result.Raise("Can't update hosts file with new host data")
4143
      _RedistributeAncillaryFiles(self)
4144

    
4145

    
4146
class _NodeQuery(_QueryBase):
4147
  FIELDS = query.NODE_FIELDS
4148

    
4149
  def ExpandNames(self, lu):
4150
    lu.needed_locks = {}
4151
    lu.share_locks[locking.LEVEL_NODE] = 1
4152

    
4153
    if self.names:
4154
      self.wanted = _GetWantedNodes(lu, self.names)
4155
    else:
4156
      self.wanted = locking.ALL_SET
4157

    
4158
    self.do_locking = (self.use_locking and
4159
                       query.NQ_LIVE in self.requested_data)
4160

    
4161
    if self.do_locking:
4162
      # if we don't request only static fields, we need to lock the nodes
4163
      lu.needed_locks[locking.LEVEL_NODE] = self.wanted
4164

    
4165
  def DeclareLocks(self, lu, level):
4166
    pass
4167

    
4168
  def _GetQueryData(self, lu):
4169
    """Computes the list of nodes and their attributes.
4170

4171
    """
4172
    all_info = lu.cfg.GetAllNodesInfo()
4173

    
4174
    nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
4175

    
4176
    # Gather data as requested
4177
    if query.NQ_LIVE in self.requested_data:
4178
      # filter out non-vm_capable nodes
4179
      toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
4180

    
4181
      node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(),
4182
                                        lu.cfg.GetHypervisorType())
4183
      live_data = dict((name, nresult.payload)
4184
                       for (name, nresult) in node_data.items()
4185
                       if not nresult.fail_msg and nresult.payload)
4186
    else:
4187
      live_data = None
4188

    
4189
    if query.NQ_INST in self.requested_data:
4190
      node_to_primary = dict([(name, set()) for name in nodenames])
4191
      node_to_secondary = dict([(name, set()) for name in nodenames])
4192

    
4193
      inst_data = lu.cfg.GetAllInstancesInfo()
4194

    
4195
      for inst in inst_data.values():
4196
        if inst.primary_node in node_to_primary:
4197
          node_to_primary[inst.primary_node].add(inst.name)
4198
        for secnode in inst.secondary_nodes:
4199
          if secnode in node_to_secondary:
4200
            node_to_secondary[secnode].add(inst.name)
4201
    else:
4202
      node_to_primary = None
4203
      node_to_secondary = None
4204

    
4205
    if query.NQ_OOB in self.requested_data:
4206
      oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
4207
                         for name, node in all_info.iteritems())
4208
    else:
4209
      oob_support = None
4210

    
4211
    if query.NQ_GROUP in self.requested_data:
4212
      groups = lu.cfg.GetAllNodeGroupsInfo()
4213
    else:
4214
      groups = {}
4215

    
4216
    return query.NodeQueryData([all_info[name] for name in nodenames],
4217
                               live_data, lu.cfg.GetMasterNode(),
4218
                               node_to_primary, node_to_secondary, groups,
4219
                               oob_support, lu.cfg.GetClusterInfo())
4220

    
4221

    
4222
class LUNodeQuery(NoHooksLU):
4223
  """Logical unit for querying nodes.
4224

4225
  """
4226
  # pylint: disable-msg=W0142
4227
  REQ_BGL = False
4228

    
4229
  def CheckArguments(self):
4230
    self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
4231
                         self.op.output_fields, self.op.use_locking)
4232

    
4233
  def ExpandNames(self):
4234
    self.nq.ExpandNames(self)
4235

    
4236
  def Exec(self, feedback_fn):
4237
    return self.nq.OldStyleQuery(self)
4238

    
4239

    
4240
class LUNodeQueryvols(NoHooksLU):
4241
  """Logical unit for getting volumes on node(s).
4242

4243
  """
4244
  REQ_BGL = False
4245
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
4246
  _FIELDS_STATIC = utils.FieldSet("node")
4247

    
4248
  def CheckArguments(self):
4249
    _CheckOutputFields(static=self._FIELDS_STATIC,
4250
                       dynamic=self._FIELDS_DYNAMIC,
4251
                       selected=self.op.output_fields)
4252

    
4253
  def ExpandNames(self):
4254
    self.needed_locks = {}
4255
    self.share_locks[locking.LEVEL_NODE] = 1
4256
    if not self.op.nodes:
4257
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4258
    else:
4259
      self.needed_locks[locking.LEVEL_NODE] = \
4260
        _GetWantedNodes(self, self.op.nodes)
4261

    
4262
  def Exec(self, feedback_fn):
4263
    """Computes the list of nodes and their attributes.
4264

4265
    """
4266
    nodenames = self.glm.list_owned(locking.LEVEL_NODE)
4267
    volumes = self.rpc.call_node_volumes(nodenames)
4268

    
4269
    ilist = self.cfg.GetAllInstancesInfo()
4270
    vol2inst = _MapInstanceDisksToNodes(ilist.values())
4271

    
4272
    output = []
4273
    for node in nodenames:
4274
      nresult = volumes[node]
4275
      if nresult.offline:
4276
        continue
4277
      msg = nresult.fail_msg
4278
      if msg:
4279
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
4280
        continue
4281

    
4282
      node_vols = sorted(nresult.payload,
4283
                         key=operator.itemgetter("dev"))
4284

    
4285
      for vol in node_vols:
4286
        node_output = []
4287
        for field in self.op.output_fields:
4288
          if field == "node":
4289
            val = node
4290
          elif field == "phys":
4291
            val = vol["dev"]
4292
          elif field == "vg":
4293
            val = vol["vg"]
4294
          elif field == "name":
4295
            val = vol["name"]
4296
          elif field == "size":
4297
            val = int(float(vol["size"]))
4298
          elif field == "instance":
4299
            val = vol2inst.get((node, vol["vg"] + "/" + vol["name"]), "-")
4300
          else:
4301
            raise errors.ParameterError(field)
4302
          node_output.append(str(val))
4303

    
4304
        output.append(node_output)
4305

    
4306
    return output
4307

    
4308

    
4309
class LUNodeQueryStorage(NoHooksLU):
4310
  """Logical unit for getting information on storage units on node(s).
4311

4312
  """
4313
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
4314
  REQ_BGL = False
4315

    
4316
  def CheckArguments(self):
4317
    _CheckOutputFields(static=self._FIELDS_STATIC,
4318
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
4319
                       selected=self.op.output_fields)
4320

    
4321
  def ExpandNames(self):
4322
    self.needed_locks = {}
4323
    self.share_locks[locking.LEVEL_NODE] = 1
4324

    
4325
    if self.op.nodes:
4326
      self.needed_locks[locking.LEVEL_NODE] = \
4327
        _GetWantedNodes(self, self.op.nodes)
4328
    else:
4329
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4330

    
4331
  def Exec(self, feedback_fn):
4332
    """Computes the list of nodes and their attributes.
4333

4334
    """
4335
    self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
4336

    
4337
    # Always get name to sort by
4338
    if constants.SF_NAME in self.op.output_fields:
4339
      fields = self.op.output_fields[:]
4340
    else:
4341
      fields = [constants.SF_NAME] + self.op.output_fields
4342

    
4343
    # Never ask for node or type as it's only known to the LU
4344
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
4345
      while extra in fields:
4346
        fields.remove(extra)
4347

    
4348
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
4349
    name_idx = field_idx[constants.SF_NAME]
4350

    
4351
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4352
    data = self.rpc.call_storage_list(self.nodes,
4353
                                      self.op.storage_type, st_args,
4354
                                      self.op.name, fields)
4355

    
4356
    result = []
4357

    
4358
    for node in utils.NiceSort(self.nodes):
4359
      nresult = data[node]
4360
      if nresult.offline:
4361
        continue
4362

    
4363
      msg = nresult.fail_msg
4364
      if msg:
4365
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
4366
        continue
4367

    
4368
      rows = dict([(row[name_idx], row) for row in nresult.payload])
4369

    
4370
      for name in utils.NiceSort(rows.keys()):
4371
        row = rows[name]
4372

    
4373
        out = []
4374

    
4375
        for field in self.op.output_fields:
4376
          if field == constants.SF_NODE:
4377
            val = node
4378
          elif field == constants.SF_TYPE:
4379
            val = self.op.storage_type
4380
          elif field in field_idx:
4381
            val = row[field_idx[field]]
4382
          else:
4383
            raise errors.ParameterError(field)
4384

    
4385
          out.append(val)
4386

    
4387
        result.append(out)
4388

    
4389
    return result
4390

    
4391

    
4392
class _InstanceQuery(_QueryBase):
4393
  FIELDS = query.INSTANCE_FIELDS
4394

    
4395
  def ExpandNames(self, lu):
4396
    lu.needed_locks = {}
4397
    lu.share_locks[locking.LEVEL_INSTANCE] = 1
4398
    lu.share_locks[locking.LEVEL_NODE] = 1
4399

    
4400
    if self.names:
4401
      self.wanted = _GetWantedInstances(lu, self.names)
4402
    else:
4403
      self.wanted = locking.ALL_SET
4404

    
4405
    self.do_locking = (self.use_locking and
4406
                       query.IQ_LIVE in self.requested_data)
4407
    if self.do_locking:
4408
      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
4409
      lu.needed_locks[locking.LEVEL_NODE] = []
4410
      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4411

    
4412
  def DeclareLocks(self, lu, level):
4413
    if level == locking.LEVEL_NODE and self.do_locking:
4414
      lu._LockInstancesNodes() # pylint: disable-msg=W0212
4415

    
4416
  def _GetQueryData(self, lu):
4417
    """Computes the list of instances and their attributes.
4418

4419
    """
4420
    cluster = lu.cfg.GetClusterInfo()
4421
    all_info = lu.cfg.GetAllInstancesInfo()
4422

    
4423
    instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
4424

    
4425
    instance_list = [all_info[name] for name in instance_names]
4426
    nodes = frozenset(itertools.chain(*(inst.all_nodes
4427
                                        for inst in instance_list)))
4428
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
4429
    bad_nodes = []
4430
    offline_nodes = []
4431
    wrongnode_inst = set()
4432

    
4433
    # Gather data as requested
4434
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
4435
      live_data = {}
4436
      node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
4437
      for name in nodes:
4438
        result = node_data[name]
4439
        if result.offline:
4440
          # offline nodes will be in both lists
4441
          assert result.fail_msg
4442
          offline_nodes.append(name)
4443
        if result.fail_msg:
4444
          bad_nodes.append(name)
4445
        elif result.payload:
4446
          for inst in result.payload:
4447
            if inst in all_info:
4448
              if all_info[inst].primary_node == name:
4449
                live_data.update(result.payload)
4450
              else:
4451
                wrongnode_inst.add(inst)
4452
            else:
4453
              # orphan instance; we don't list it here as we don't
4454
              # handle this case yet in the output of instance listing
4455
              logging.warning("Orphan instance '%s' found on node %s",
4456
                              inst, name)
4457
        # else no instance is alive
4458
    else:
4459
      live_data = {}
4460

    
4461
    if query.IQ_DISKUSAGE in self.requested_data:
4462
      disk_usage = dict((inst.name,
4463
                         _ComputeDiskSize(inst.disk_template,
4464
                                          [{constants.IDISK_SIZE: disk.size}
4465
                                           for disk in inst.disks]))
4466
                        for inst in instance_list)
4467
    else:
4468
      disk_usage = None
4469

    
4470
    if query.IQ_CONSOLE in self.requested_data:
4471
      consinfo = {}
4472
      for inst in instance_list:
4473
        if inst.name in live_data:
4474
          # Instance is running
4475
          consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
4476
        else:
4477
          consinfo[inst.name] = None
4478
      assert set(consinfo.keys()) == set(instance_names)
4479
    else:
4480
      consinfo = None
4481

    
4482
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
4483
                                   disk_usage, offline_nodes, bad_nodes,
4484
                                   live_data, wrongnode_inst, consinfo)
4485

    
4486

    
4487
class LUQuery(NoHooksLU):
4488
  """Query for resources/items of a certain kind.
4489

4490
  """
4491
  # pylint: disable-msg=W0142
4492
  REQ_BGL = False
4493

    
4494
  def CheckArguments(self):
4495
    qcls = _GetQueryImplementation(self.op.what)
4496

    
4497
    self.impl = qcls(self.op.filter, self.op.fields, False)
4498

    
4499
  def ExpandNames(self):
4500
    self.impl.ExpandNames(self)
4501

    
4502
  def DeclareLocks(self, level):
4503
    self.impl.DeclareLocks(self, level)
4504

    
4505
  def Exec(self, feedback_fn):
4506
    return self.impl.NewStyleQuery(self)
4507

    
4508

    
4509
class LUQueryFields(NoHooksLU):
4510
  """Query for resources/items of a certain kind.
4511

4512
  """
4513
  # pylint: disable-msg=W0142
4514
  REQ_BGL = False
4515

    
4516
  def CheckArguments(self):
4517
    self.qcls = _GetQueryImplementation(self.op.what)
4518

    
4519
  def ExpandNames(self):
4520
    self.needed_locks = {}
4521

    
4522
  def Exec(self, feedback_fn):
4523
    return query.QueryFields(self.qcls.FIELDS, self.op.fields)
4524

    
4525

    
4526
class LUNodeModifyStorage(NoHooksLU):
4527
  """Logical unit for modifying a storage volume on a node.
4528

4529
  """
4530
  REQ_BGL = False
4531

    
4532
  def CheckArguments(self):
4533
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4534

    
4535
    storage_type = self.op.storage_type
4536

    
4537
    try:
4538
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
4539
    except KeyError:
4540
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
4541
                                 " modified" % storage_type,
4542
                                 errors.ECODE_INVAL)
4543

    
4544
    diff = set(self.op.changes.keys()) - modifiable
4545
    if diff:
4546
      raise errors.OpPrereqError("The following fields can not be modified for"
4547
                                 " storage units of type '%s': %r" %
4548
                                 (storage_type, list(diff)),
4549
                                 errors.ECODE_INVAL)
4550

    
4551
  def ExpandNames(self):
4552
    self.needed_locks = {
4553
      locking.LEVEL_NODE: self.op.node_name,
4554
      }
4555

    
4556
  def Exec(self, feedback_fn):
4557
    """Computes the list of nodes and their attributes.
4558

4559
    """
4560
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4561
    result = self.rpc.call_storage_modify(self.op.node_name,
4562
                                          self.op.storage_type, st_args,
4563
                                          self.op.name, self.op.changes)
4564
    result.Raise("Failed to modify storage unit '%s' on %s" %
4565
                 (self.op.name, self.op.node_name))
4566

    
4567

    
4568
class LUNodeAdd(LogicalUnit):
4569
  """Logical unit for adding node to the cluster.
4570

4571
  """
4572
  HPATH = "node-add"
4573
  HTYPE = constants.HTYPE_NODE
4574
  _NFLAGS = ["master_capable", "vm_capable"]
4575

    
4576
  def CheckArguments(self):
4577
    self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
4578
    # validate/normalize the node name
4579
    self.hostname = netutils.GetHostname(name=self.op.node_name,
4580
                                         family=self.primary_ip_family)
4581
    self.op.node_name = self.hostname.name
4582

    
4583
    if self.op.readd and self.op.node_name == self.cfg.GetMasterNode():
4584
      raise errors.OpPrereqError("Cannot readd the master node",
4585
                                 errors.ECODE_STATE)
4586

    
4587
    if self.op.readd and self.op.group:
4588
      raise errors.OpPrereqError("Cannot pass a node group when a node is"
4589
                                 " being readded", errors.ECODE_INVAL)
4590

    
4591
  def BuildHooksEnv(self):
4592
    """Build hooks env.
4593

4594
    This will run on all nodes before, and on all nodes + the new node after.
4595

4596
    """
4597
    return {
4598
      "OP_TARGET": self.op.node_name,
4599
      "NODE_NAME": self.op.node_name,
4600
      "NODE_PIP": self.op.primary_ip,
4601
      "NODE_SIP": self.op.secondary_ip,
4602
      "MASTER_CAPABLE": str(self.op.master_capable),
4603
      "VM_CAPABLE": str(self.op.vm_capable),
4604
      }
4605

    
4606
  def BuildHooksNodes(self):
4607
    """Build hooks nodes.
4608

4609
    """
4610
    # Exclude added node
4611
    pre_nodes = list(set(self.cfg.GetNodeList()) - set([self.op.node_name]))
4612
    post_nodes = pre_nodes + [self.op.node_name, ]
4613

    
4614
    return (pre_nodes, post_nodes)
4615

    
4616
  def CheckPrereq(self):
4617
    """Check prerequisites.
4618

4619
    This checks:
4620
     - the new node is not already in the config
4621
     - it is resolvable
4622
     - its parameters (single/dual homed) matches the cluster
4623

4624
    Any errors are signaled by raising errors.OpPrereqError.
4625

4626
    """
4627
    cfg = self.cfg
4628
    hostname = self.hostname
4629
    node = hostname.name
4630
    primary_ip = self.op.primary_ip = hostname.ip
4631
    if self.op.secondary_ip is None:
4632
      if self.primary_ip_family == netutils.IP6Address.family:
4633
        raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
4634
                                   " IPv4 address must be given as secondary",
4635
                                   errors.ECODE_INVAL)
4636
      self.op.secondary_ip = primary_ip
4637

    
4638
    secondary_ip = self.op.secondary_ip
4639
    if not netutils.IP4Address.IsValid(secondary_ip):
4640
      raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4641
                                 " address" % secondary_ip, errors.ECODE_INVAL)
4642

    
4643
    node_list = cfg.GetNodeList()
4644
    if not self.op.readd and node in node_list:
4645
      raise errors.OpPrereqError("Node %s is already in the configuration" %
4646
                                 node, errors.ECODE_EXISTS)
4647
    elif self.op.readd and node not in node_list:
4648
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
4649
                                 errors.ECODE_NOENT)
4650

    
4651
    self.changed_primary_ip = False
4652

    
4653
    for existing_node_name in node_list:
4654
      existing_node = cfg.GetNodeInfo(existing_node_name)
4655

    
4656
      if self.op.readd and node == existing_node_name:
4657
        if existing_node.secondary_ip != secondary_ip:
4658
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
4659
                                     " address configuration as before",
4660
                                     errors.ECODE_INVAL)
4661
        if existing_node.primary_ip != primary_ip:
4662
          self.changed_primary_ip = True
4663

    
4664
        continue
4665

    
4666
      if (existing_node.primary_ip == primary_ip or
4667
          existing_node.secondary_ip == primary_ip or
4668
          existing_node.primary_ip == secondary_ip or
4669
          existing_node.secondary_ip == secondary_ip):
4670
        raise errors.OpPrereqError("New node ip address(es) conflict with"
4671
                                   " existing node %s" % existing_node.name,
4672
                                   errors.ECODE_NOTUNIQUE)
4673

    
4674
    # After this 'if' block, None is no longer a valid value for the
4675
    # _capable op attributes
4676
    if self.op.readd:
4677
      old_node = self.cfg.GetNodeInfo(node)
4678
      assert old_node is not None, "Can't retrieve locked node %s" % node
4679
      for attr in self._NFLAGS:
4680
        if getattr(self.op, attr) is None:
4681
          setattr(self.op, attr, getattr(old_node, attr))
4682
    else:
4683
      for attr in self._NFLAGS:
4684
        if getattr(self.op, attr) is None:
4685
          setattr(self.op, attr, True)
4686

    
4687
    if self.op.readd and not self.op.vm_capable:
4688
      pri, sec = cfg.GetNodeInstances(node)
4689
      if pri or sec:
4690
        raise errors.OpPrereqError("Node %s being re-added with vm_capable"
4691
                                   " flag set to false, but it already holds"
4692
                                   " instances" % node,
4693
                                   errors.ECODE_STATE)
4694

    
4695
    # check that the type of the node (single versus dual homed) is the
4696
    # same as for the master
4697
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
4698
    master_singlehomed = myself.secondary_ip == myself.primary_ip
4699
    newbie_singlehomed = secondary_ip == primary_ip
4700
    if master_singlehomed != newbie_singlehomed:
4701
      if master_singlehomed:
4702
        raise errors.OpPrereqError("The master has no secondary ip but the"
4703
                                   " new node has one",
4704
                                   errors.ECODE_INVAL)
4705
      else:
4706
        raise errors.OpPrereqError("The master has a secondary ip but the"
4707
                                   " new node doesn't have one",
4708
                                   errors.ECODE_INVAL)
4709

    
4710
    # checks reachability
4711
    if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
4712
      raise errors.OpPrereqError("Node not reachable by ping",
4713
                                 errors.ECODE_ENVIRON)
4714

    
4715
    if not newbie_singlehomed:
4716
      # check reachability from my secondary ip to newbie's secondary ip
4717
      if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
4718
                           source=myself.secondary_ip):
4719
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4720
                                   " based ping to node daemon port",
4721
                                   errors.ECODE_ENVIRON)
4722

    
4723
    if self.op.readd:
4724
      exceptions = [node]
4725
    else:
4726
      exceptions = []
4727

    
4728
    if self.op.master_capable:
4729
      self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
4730
    else:
4731
      self.master_candidate = False
4732

    
4733
    if self.op.readd:
4734
      self.new_node = old_node
4735
    else:
4736
      node_group = cfg.LookupNodeGroup(self.op.group)
4737
      self.new_node = objects.Node(name=node,
4738
                                   primary_ip=primary_ip,
4739
                                   secondary_ip=secondary_ip,
4740
                                   master_candidate=self.master_candidate,
4741
                                   offline=False, drained=False,
4742
                                   group=node_group)
4743

    
4744
    if self.op.ndparams:
4745
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
4746

    
4747
  def Exec(self, feedback_fn):
4748
    """Adds the new node to the cluster.
4749

4750
    """
4751
    new_node = self.new_node
4752
    node = new_node.name
4753

    
4754
    # We adding a new node so we assume it's powered
4755
    new_node.powered = True
4756

    
4757
    # for re-adds, reset the offline/drained/master-candidate flags;
4758
    # we need to reset here, otherwise offline would prevent RPC calls
4759
    # later in the procedure; this also means that if the re-add
4760
    # fails, we are left with a non-offlined, broken node
4761
    if self.op.readd:
4762
      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
4763
      self.LogInfo("Readding a node, the offline/drained flags were reset")
4764
      # if we demote the node, we do cleanup later in the procedure
4765
      new_node.master_candidate = self.master_candidate
4766
      if self.changed_primary_ip:
4767
        new_node.primary_ip = self.op.primary_ip
4768

    
4769
    # copy the master/vm_capable flags
4770
    for attr in self._NFLAGS:
4771
      setattr(new_node, attr, getattr(self.op, attr))
4772

    
4773
    # notify the user about any possible mc promotion
4774
    if new_node.master_candidate:
4775
      self.LogInfo("Node will be a master candidate")
4776

    
4777
    if self.op.ndparams:
4778
      new_node.ndparams = self.op.ndparams
4779
    else:
4780
      new_node.ndparams = {}
4781

    
4782
    # check connectivity
4783
    result = self.rpc.call_version([node])[node]
4784
    result.Raise("Can't get version information from node %s" % node)
4785
    if constants.PROTOCOL_VERSION == result.payload:
4786
      logging.info("Communication to node %s fine, sw version %s match",
4787
                   node, result.payload)
4788
    else:
4789
      raise errors.OpExecError("Version mismatch master version %s,"
4790
                               " node version %s" %
4791
                               (constants.PROTOCOL_VERSION, result.payload))
4792

    
4793
    # Add node to our /etc/hosts, and add key to known_hosts
4794
    if self.cfg.GetClusterInfo().modify_etc_hosts:
4795
      master_node = self.cfg.GetMasterNode()
4796
      result = self.rpc.call_etc_hosts_modify(master_node,
4797
                                              constants.ETC_HOSTS_ADD,
4798
                                              self.hostname.name,
4799
                                              self.hostname.ip)
4800
      result.Raise("Can't update hosts file with new host data")
4801

    
4802
    if new_node.secondary_ip != new_node.primary_ip:
4803
      _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
4804
                               False)
4805

    
4806
    node_verify_list = [self.cfg.GetMasterNode()]
4807
    node_verify_param = {
4808
      constants.NV_NODELIST: [node],
4809
      # TODO: do a node-net-test as well?
4810
    }
4811

    
4812
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
4813
                                       self.cfg.GetClusterName())
4814
    for verifier in node_verify_list:
4815
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
4816
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
4817
      if nl_payload:
4818
        for failed in nl_payload:
4819
          feedback_fn("ssh/hostname verification failed"
4820
                      " (checking from %s): %s" %
4821
                      (verifier, nl_payload[failed]))
4822
        raise errors.OpExecError("ssh/hostname verification failed")
4823

    
4824
    if self.op.readd:
4825
      _RedistributeAncillaryFiles(self)
4826
      self.context.ReaddNode(new_node)
4827
      # make sure we redistribute the config
4828
      self.cfg.Update(new_node, feedback_fn)
4829
      # and make sure the new node will not have old files around
4830
      if not new_node.master_candidate:
4831
        result = self.rpc.call_node_demote_from_mc(new_node.name)
4832
        msg = result.fail_msg
4833
        if msg:
4834
          self.LogWarning("Node failed to demote itself from master"
4835
                          " candidate status: %s" % msg)
4836
    else:
4837
      _RedistributeAncillaryFiles(self, additional_nodes=[node],
4838
                                  additional_vm=self.op.vm_capable)
4839
      self.context.AddNode(new_node, self.proc.GetECId())
4840

    
4841

    
4842
class LUNodeSetParams(LogicalUnit):
4843
  """Modifies the parameters of a node.
4844

4845
  @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
4846
      to the node role (as _ROLE_*)
4847
  @cvar _R2F: a dictionary from node role to tuples of flags
4848
  @cvar _FLAGS: a list of attribute names corresponding to the flags
4849

4850
  """
4851
  HPATH = "node-modify"
4852
  HTYPE = constants.HTYPE_NODE
4853
  REQ_BGL = False
4854
  (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
4855
  _F2R = {
4856
    (True, False, False): _ROLE_CANDIDATE,
4857
    (False, True, False): _ROLE_DRAINED,
4858
    (False, False, True): _ROLE_OFFLINE,
4859
    (False, False, False): _ROLE_REGULAR,
4860
    }
4861
  _R2F = dict((v, k) for k, v in _F2R.items())
4862
  _FLAGS = ["master_candidate", "drained", "offline"]
4863

    
4864
  def CheckArguments(self):
4865
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4866
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
4867
                self.op.master_capable, self.op.vm_capable,
4868
                self.op.secondary_ip, self.op.ndparams]
4869
    if all_mods.count(None) == len(all_mods):
4870
      raise errors.OpPrereqError("Please pass at least one modification",
4871
                                 errors.ECODE_INVAL)
4872
    if all_mods.count(True) > 1:
4873
      raise errors.OpPrereqError("Can't set the node into more than one"
4874
                                 " state at the same time",
4875
                                 errors.ECODE_INVAL)
4876

    
4877
    # Boolean value that tells us whether we might be demoting from MC
4878
    self.might_demote = (self.op.master_candidate == False or
4879
                         self.op.offline == True or
4880
                         self.op.drained == True or
4881
                         self.op.master_capable == False)
4882

    
4883
    if self.op.secondary_ip:
4884
      if not netutils.IP4Address.IsValid(self.op.secondary_ip):
4885
        raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4886
                                   " address" % self.op.secondary_ip,
4887
                                   errors.ECODE_INVAL)
4888

    
4889
    self.lock_all = self.op.auto_promote and self.might_demote
4890
    self.lock_instances = self.op.secondary_ip is not None
4891

    
4892
  def ExpandNames(self):
4893
    if self.lock_all:
4894
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
4895
    else:
4896
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
4897

    
4898
    if self.lock_instances:
4899
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4900

    
4901
  def DeclareLocks(self, level):
4902
    # If we have locked all instances, before waiting to lock nodes, release
4903
    # all the ones living on nodes unrelated to the current operation.
4904
    if level == locking.LEVEL_NODE and self.lock_instances:
4905
      self.affected_instances = []
4906
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4907
        instances_keep = []
4908

    
4909
        # Build list of instances to release
4910
        for instance_name in self.glm.list_owned(locking.LEVEL_INSTANCE):
4911
          instance = self.context.cfg.GetInstanceInfo(instance_name)
4912
          if (instance.disk_template in constants.DTS_INT_MIRROR and
4913
              self.op.node_name in instance.all_nodes):
4914
            instances_keep.append(instance_name)
4915
            self.affected_instances.append(instance)
4916

    
4917
        _ReleaseLocks(self, locking.LEVEL_INSTANCE, keep=instances_keep)
4918

    
4919
        assert (set(self.glm.list_owned(locking.LEVEL_INSTANCE)) ==
4920
                set(instances_keep))
4921

    
4922
  def BuildHooksEnv(self):
4923
    """Build hooks env.
4924

4925
    This runs on the master node.
4926

4927
    """
4928
    return {
4929
      "OP_TARGET": self.op.node_name,
4930
      "MASTER_CANDIDATE": str(self.op.master_candidate),
4931
      "OFFLINE": str(self.op.offline),
4932
      "DRAINED": str(self.op.drained),
4933
      "MASTER_CAPABLE": str(self.op.master_capable),
4934
      "VM_CAPABLE": str(self.op.vm_capable),
4935
      }
4936

    
4937
  def BuildHooksNodes(self):
4938
    """Build hooks nodes.
4939

4940
    """
4941
    nl = [self.cfg.GetMasterNode(), self.op.node_name]
4942
    return (nl, nl)
4943

    
4944
  def CheckPrereq(self):
4945
    """Check prerequisites.
4946

4947
    This only checks the instance list against the existing names.
4948

4949
    """
4950
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
4951

    
4952
    if (self.op.master_candidate is not None or
4953
        self.op.drained is not None or
4954
        self.op.offline is not None):
4955
      # we can't change the master's node flags
4956
      if self.op.node_name == self.cfg.GetMasterNode():
4957
        raise errors.OpPrereqError("The master role can be changed"
4958
                                   " only via master-failover",
4959
                                   errors.ECODE_INVAL)
4960

    
4961
    if self.op.master_candidate and not node.master_capable:
4962
      raise errors.OpPrereqError("Node %s is not master capable, cannot make"
4963
                                 " it a master candidate" % node.name,
4964
                                 errors.ECODE_STATE)
4965

    
4966
    if self.op.vm_capable == False:
4967
      (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
4968
      if ipri or isec:
4969
        raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
4970
                                   " the vm_capable flag" % node.name,
4971
                                   errors.ECODE_STATE)
4972

    
4973
    if node.master_candidate and self.might_demote and not self.lock_all:
4974
      assert not self.op.auto_promote, "auto_promote set but lock_all not"
4975
      # check if after removing the current node, we're missing master
4976
      # candidates
4977
      (mc_remaining, mc_should, _) = \
4978
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
4979
      if mc_remaining < mc_should:
4980
        raise errors.OpPrereqError("Not enough master candidates, please"
4981
                                   " pass auto promote option to allow"
4982
                                   " promotion", errors.ECODE_STATE)
4983

    
4984
    self.old_flags = old_flags = (node.master_candidate,
4985
                                  node.drained, node.offline)
4986
    assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
4987
    self.old_role = old_role = self._F2R[old_flags]
4988

    
4989
    # Check for ineffective changes
4990
    for attr in self._FLAGS:
4991
      if (getattr(self.op, attr) == False and getattr(node, attr) == False):
4992
        self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
4993
        setattr(self.op, attr, None)
4994

    
4995
    # Past this point, any flag change to False means a transition
4996
    # away from the respective state, as only real changes are kept
4997

    
4998
    # TODO: We might query the real power state if it supports OOB
4999
    if _SupportsOob(self.cfg, node):
5000
      if self.op.offline is False and not (node.powered or
5001
                                           self.op.powered == True):
5002
        raise errors.OpPrereqError(("Node %s needs to be turned on before its"
5003
                                    " offline status can be reset") %
5004
                                   self.op.node_name)
5005
    elif self.op.powered is not None:
5006
      raise errors.OpPrereqError(("Unable to change powered state for node %s"
5007
                                  " as it does not support out-of-band"
5008
                                  " handling") % self.op.node_name)
5009

    
5010
    # If we're being deofflined/drained, we'll MC ourself if needed
5011
    if (self.op.drained == False or self.op.offline == False or
5012
        (self.op.master_capable and not node.master_capable)):
5013
      if _DecideSelfPromotion(self):
5014
        self.op.master_candidate = True
5015
        self.LogInfo("Auto-promoting node to master candidate")
5016

    
5017
    # If we're no longer master capable, we'll demote ourselves from MC
5018
    if self.op.master_capable == False and node.master_candidate:
5019
      self.LogInfo("Demoting from master candidate")
5020
      self.op.master_candidate = False
5021

    
5022
    # Compute new role
5023
    assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
5024
    if self.op.master_candidate:
5025
      new_role = self._ROLE_CANDIDATE
5026
    elif self.op.drained:
5027
      new_role = self._ROLE_DRAINED
5028
    elif self.op.offline:
5029
      new_role = self._ROLE_OFFLINE
5030
    elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
5031
      # False is still in new flags, which means we're un-setting (the
5032
      # only) True flag
5033
      new_role = self._ROLE_REGULAR
5034
    else: # no new flags, nothing, keep old role
5035
      new_role = old_role
5036

    
5037
    self.new_role = new_role
5038

    
5039
    if old_role == self._ROLE_OFFLINE and new_role != old_role:
5040
      # Trying to transition out of offline status
5041
      result = self.rpc.call_version([node.name])[node.name]
5042
      if result.fail_msg:
5043
        raise errors.OpPrereqError("Node %s is being de-offlined but fails"
5044
                                   " to report its version: %s" %
5045
                                   (node.name, result.fail_msg),
5046
                                   errors.ECODE_STATE)
5047
      else:
5048
        self.LogWarning("Transitioning node from offline to online state"
5049
                        " without using re-add. Please make sure the node"
5050
                        " is healthy!")
5051

    
5052
    if self.op.secondary_ip:
5053
      # Ok even without locking, because this can't be changed by any LU
5054
      master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
5055
      master_singlehomed = master.secondary_ip == master.primary_ip
5056
      if master_singlehomed and self.op.secondary_ip:
5057
        raise errors.OpPrereqError("Cannot change the secondary ip on a single"
5058
                                   " homed cluster", errors.ECODE_INVAL)
5059

    
5060
      if node.offline:
5061
        if self.affected_instances:
5062
          raise errors.OpPrereqError("Cannot change secondary ip: offline"
5063
                                     " node has instances (%s) configured"
5064
                                     " to use it" % self.affected_instances)
5065
      else:
5066
        # On online nodes, check that no instances are running, and that
5067
        # the node has the new ip and we can reach it.
5068
        for instance in self.affected_instances:
5069
          _CheckInstanceDown(self, instance, "cannot change secondary ip")
5070

    
5071
        _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
5072
        if master.name != node.name:
5073
          # check reachability from master secondary ip to new secondary ip
5074
          if not netutils.TcpPing(self.op.secondary_ip,
5075
                                  constants.DEFAULT_NODED_PORT,
5076
                                  source=master.secondary_ip):
5077
            raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
5078
                                       " based ping to node daemon port",
5079
                                       errors.ECODE_ENVIRON)
5080

    
5081
    if self.op.ndparams:
5082
      new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
5083
      utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
5084
      self.new_ndparams = new_ndparams
5085

    
5086
  def Exec(self, feedback_fn):
5087
    """Modifies a node.
5088

5089
    """
5090
    node = self.node
5091
    old_role = self.old_role
5092
    new_role = self.new_role
5093

    
5094
    result = []
5095

    
5096
    if self.op.ndparams:
5097
      node.ndparams = self.new_ndparams
5098

    
5099
    if self.op.powered is not None:
5100
      node.powered = self.op.powered
5101

    
5102
    for attr in ["master_capable", "vm_capable"]:
5103
      val = getattr(self.op, attr)
5104
      if val is not None:
5105
        setattr(node, attr, val)
5106
        result.append((attr, str(val)))
5107

    
5108
    if new_role != old_role:
5109
      # Tell the node to demote itself, if no longer MC and not offline
5110
      if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
5111
        msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
5112
        if msg:
5113
          self.LogWarning("Node failed to demote itself: %s", msg)
5114

    
5115
      new_flags = self._R2F[new_role]
5116
      for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
5117
        if of != nf:
5118
          result.append((desc, str(nf)))
5119
      (node.master_candidate, node.drained, node.offline) = new_flags
5120

    
5121
      # we locked all nodes, we adjust the CP before updating this node
5122
      if self.lock_all:
5123
        _AdjustCandidatePool(self, [node.name])
5124

    
5125
    if self.op.secondary_ip:
5126
      node.secondary_ip = self.op.secondary_ip
5127
      result.append(("secondary_ip", self.op.secondary_ip))
5128

    
5129
    # this will trigger configuration file update, if needed
5130
    self.cfg.Update(node, feedback_fn)
5131

    
5132
    # this will trigger job queue propagation or cleanup if the mc
5133
    # flag changed
5134
    if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
5135
      self.context.ReaddNode(node)
5136

    
5137
    return result
5138

    
5139

    
5140
class LUNodePowercycle(NoHooksLU):
5141
  """Powercycles a node.
5142

5143
  """
5144
  REQ_BGL = False
5145

    
5146
  def CheckArguments(self):
5147
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5148
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
5149
      raise errors.OpPrereqError("The node is the master and the force"
5150
                                 " parameter was not set",
5151
                                 errors.ECODE_INVAL)
5152

    
5153
  def ExpandNames(self):
5154
    """Locking for PowercycleNode.
5155

5156
    This is a last-resort option and shouldn't block on other
5157
    jobs. Therefore, we grab no locks.
5158

5159
    """
5160
    self.needed_locks = {}
5161

    
5162
  def Exec(self, feedback_fn):
5163
    """Reboots a node.
5164

5165
    """
5166
    result = self.rpc.call_node_powercycle(self.op.node_name,
5167
                                           self.cfg.GetHypervisorType())
5168
    result.Raise("Failed to schedule the reboot")
5169
    return result.payload
5170

    
5171

    
5172
class LUClusterQuery(NoHooksLU):
5173
  """Query cluster configuration.
5174

5175
  """
5176
  REQ_BGL = False
5177

    
5178
  def ExpandNames(self):
5179
    self.needed_locks = {}
5180

    
5181
  def Exec(self, feedback_fn):
5182
    """Return cluster config.
5183

5184
    """
5185
    cluster = self.cfg.GetClusterInfo()
5186
    os_hvp = {}
5187

    
5188
    # Filter just for enabled hypervisors
5189
    for os_name, hv_dict in cluster.os_hvp.items():
5190
      os_hvp[os_name] = {}
5191
      for hv_name, hv_params in hv_dict.items():
5192
        if hv_name in cluster.enabled_hypervisors:
5193
          os_hvp[os_name][hv_name] = hv_params
5194

    
5195
    # Convert ip_family to ip_version
5196
    primary_ip_version = constants.IP4_VERSION
5197
    if cluster.primary_ip_family == netutils.IP6Address.family:
5198
      primary_ip_version = constants.IP6_VERSION
5199

    
5200
    result = {
5201
      "software_version": constants.RELEASE_VERSION,
5202
      "protocol_version": constants.PROTOCOL_VERSION,
5203
      "config_version": constants.CONFIG_VERSION,
5204
      "os_api_version": max(constants.OS_API_VERSIONS),
5205
      "export_version": constants.EXPORT_VERSION,
5206
      "architecture": (platform.architecture()[0], platform.machine()),
5207
      "name": cluster.cluster_name,
5208
      "master": cluster.master_node,
5209
      "default_hypervisor": cluster.enabled_hypervisors[0],
5210
      "enabled_hypervisors": cluster.enabled_hypervisors,
5211
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
5212
                        for hypervisor_name in cluster.enabled_hypervisors]),
5213
      "os_hvp": os_hvp,
5214
      "beparams": cluster.beparams,
5215
      "osparams": cluster.osparams,
5216
      "nicparams": cluster.nicparams,
5217
      "ndparams": cluster.ndparams,
5218
      "candidate_pool_size": cluster.candidate_pool_size,
5219
      "master_netdev": cluster.master_netdev,
5220
      "volume_group_name": cluster.volume_group_name,
5221
      "drbd_usermode_helper": cluster.drbd_usermode_helper,
5222
      "file_storage_dir": cluster.file_storage_dir,
5223
      "shared_file_storage_dir": cluster.shared_file_storage_dir,
5224
      "maintain_node_health": cluster.maintain_node_health,
5225
      "ctime": cluster.ctime,
5226
      "mtime": cluster.mtime,
5227
      "uuid": cluster.uuid,
5228
      "tags": list(cluster.GetTags()),
5229
      "uid_pool": cluster.uid_pool,
5230
      "default_iallocator": cluster.default_iallocator,
5231
      "reserved_lvs": cluster.reserved_lvs,
5232
      "primary_ip_version": primary_ip_version,
5233
      "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
5234
      "hidden_os": cluster.hidden_os,
5235
      "blacklisted_os": cluster.blacklisted_os,
5236
      }
5237

    
5238
    return result
5239

    
5240

    
5241
class LUClusterConfigQuery(NoHooksLU):
5242
  """Return configuration values.
5243

5244
  """
5245
  REQ_BGL = False
5246
  _FIELDS_DYNAMIC = utils.FieldSet()
5247
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
5248
                                  "watcher_pause", "volume_group_name")
5249

    
5250
  def CheckArguments(self):
5251
    _CheckOutputFields(static=self._FIELDS_STATIC,
5252
                       dynamic=self._FIELDS_DYNAMIC,
5253
                       selected=self.op.output_fields)
5254

    
5255
  def ExpandNames(self):
5256
    self.needed_locks = {}
5257

    
5258
  def Exec(self, feedback_fn):
5259
    """Dump a representation of the cluster config to the standard output.
5260

5261
    """
5262
    values = []
5263
    for field in self.op.output_fields:
5264
      if field == "cluster_name":
5265
        entry = self.cfg.GetClusterName()
5266
      elif field == "master_node":
5267
        entry = self.cfg.GetMasterNode()
5268
      elif field == "drain_flag":
5269
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
5270
      elif field == "watcher_pause":
5271
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
5272
      elif field == "volume_group_name":
5273
        entry = self.cfg.GetVGName()
5274
      else:
5275
        raise errors.ParameterError(field)
5276
      values.append(entry)
5277
    return values
5278

    
5279

    
5280
class LUInstanceActivateDisks(NoHooksLU):
5281
  """Bring up an instance's disks.
5282

5283
  """
5284
  REQ_BGL = False
5285

    
5286
  def ExpandNames(self):
5287
    self._ExpandAndLockInstance()
5288
    self.needed_locks[locking.LEVEL_NODE] = []
5289
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5290

    
5291
  def DeclareLocks(self, level):
5292
    if level == locking.LEVEL_NODE:
5293
      self._LockInstancesNodes()
5294

    
5295
  def CheckPrereq(self):
5296
    """Check prerequisites.
5297

5298
    This checks that the instance is in the cluster.
5299

5300
    """
5301
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5302
    assert self.instance is not None, \
5303
      "Cannot retrieve locked instance %s" % self.op.instance_name
5304
    _CheckNodeOnline(self, self.instance.primary_node)
5305

    
5306
  def Exec(self, feedback_fn):
5307
    """Activate the disks.
5308

5309
    """
5310
    disks_ok, disks_info = \
5311
              _AssembleInstanceDisks(self, self.instance,
5312
                                     ignore_size=self.op.ignore_size)
5313
    if not disks_ok:
5314
      raise errors.OpExecError("Cannot activate block devices")
5315

    
5316
    return disks_info
5317

    
5318

    
5319
def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
5320
                           ignore_size=False):
5321
  """Prepare the block devices for an instance.
5322

5323
  This sets up the block devices on all nodes.
5324

5325
  @type lu: L{LogicalUnit}
5326
  @param lu: the logical unit on whose behalf we execute
5327
  @type instance: L{objects.Instance}
5328
  @param instance: the instance for whose disks we assemble
5329
  @type disks: list of L{objects.Disk} or None
5330
  @param disks: which disks to assemble (or all, if None)
5331
  @type ignore_secondaries: boolean
5332
  @param ignore_secondaries: if true, errors on secondary nodes
5333
      won't result in an error return from the function
5334
  @type ignore_size: boolean
5335
  @param ignore_size: if true, the current known size of the disk
5336
      will not be used during the disk activation, useful for cases
5337
      when the size is wrong
5338
  @return: False if the operation failed, otherwise a list of
5339
      (host, instance_visible_name, node_visible_name)
5340
      with the mapping from node devices to instance devices
5341

5342
  """
5343
  device_info = []
5344
  disks_ok = True
5345
  iname = instance.name
5346
  disks = _ExpandCheckDisks(instance, disks)
5347

    
5348
  # With the two passes mechanism we try to reduce the window of
5349
  # opportunity for the race condition of switching DRBD to primary
5350
  # before handshaking occured, but we do not eliminate it
5351

    
5352
  # The proper fix would be to wait (with some limits) until the
5353
  # connection has been made and drbd transitions from WFConnection
5354
  # into any other network-connected state (Connected, SyncTarget,
5355
  # SyncSource, etc.)
5356

    
5357
  # 1st pass, assemble on all nodes in secondary mode
5358
  for idx, inst_disk in enumerate(disks):
5359
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
5360
      if ignore_size:
5361
        node_disk = node_disk.Copy()
5362
        node_disk.UnsetSize()
5363
      lu.cfg.SetDiskID(node_disk, node)
5364
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False, idx)
5365
      msg = result.fail_msg
5366
      if msg:
5367
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
5368
                           " (is_primary=False, pass=1): %s",
5369
                           inst_disk.iv_name, node, msg)
5370
        if not ignore_secondaries:
5371
          disks_ok = False
5372

    
5373
  # FIXME: race condition on drbd migration to primary
5374

    
5375
  # 2nd pass, do only the primary node
5376
  for idx, inst_disk in enumerate(disks):
5377
    dev_path = None
5378

    
5379
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
5380
      if node != instance.primary_node:
5381
        continue
5382
      if ignore_size:
5383
        node_disk = node_disk.Copy()
5384
        node_disk.UnsetSize()
5385
      lu.cfg.SetDiskID(node_disk, node)
5386
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True, idx)
5387
      msg = result.fail_msg
5388
      if msg:
5389
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
5390
                           " (is_primary=True, pass=2): %s",
5391
                           inst_disk.iv_name, node, msg)
5392
        disks_ok = False
5393
      else:
5394
        dev_path = result.payload
5395

    
5396
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
5397

    
5398
  # leave the disks configured for the primary node
5399
  # this is a workaround that would be fixed better by
5400
  # improving the logical/physical id handling
5401
  for disk in disks:
5402
    lu.cfg.SetDiskID(disk, instance.primary_node)
5403

    
5404
  return disks_ok, device_info
5405

    
5406

    
5407
def _StartInstanceDisks(lu, instance, force):
5408
  """Start the disks of an instance.
5409

5410
  """
5411
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
5412
                                           ignore_secondaries=force)
5413
  if not disks_ok:
5414
    _ShutdownInstanceDisks(lu, instance)
5415
    if force is not None and not force:
5416
      lu.proc.LogWarning("", hint="If the message above refers to a"
5417
                         " secondary node,"
5418
                         " you can retry the operation using '--force'.")
5419
    raise errors.OpExecError("Disk consistency error")
5420

    
5421

    
5422
class LUInstanceDeactivateDisks(NoHooksLU):
5423
  """Shutdown an instance's disks.
5424

5425
  """
5426
  REQ_BGL = False
5427

    
5428
  def ExpandNames(self):
5429
    self._ExpandAndLockInstance()
5430
    self.needed_locks[locking.LEVEL_NODE] = []
5431
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5432

    
5433
  def DeclareLocks(self, level):
5434
    if level == locking.LEVEL_NODE:
5435
      self._LockInstancesNodes()
5436

    
5437
  def CheckPrereq(self):
5438
    """Check prerequisites.
5439

5440
    This checks that the instance is in the cluster.
5441

5442
    """
5443
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5444
    assert self.instance is not None, \
5445
      "Cannot retrieve locked instance %s" % self.op.instance_name
5446

    
5447
  def Exec(self, feedback_fn):
5448
    """Deactivate the disks
5449

5450
    """
5451
    instance = self.instance
5452
    if self.op.force:
5453
      _ShutdownInstanceDisks(self, instance)
5454
    else:
5455
      _SafeShutdownInstanceDisks(self, instance)
5456

    
5457

    
5458
def _SafeShutdownInstanceDisks(lu, instance, disks=None):
5459
  """Shutdown block devices of an instance.
5460

5461
  This function checks if an instance is running, before calling
5462
  _ShutdownInstanceDisks.
5463

5464
  """
5465
  _CheckInstanceDown(lu, instance, "cannot shutdown disks")
5466
  _ShutdownInstanceDisks(lu, instance, disks=disks)
5467

    
5468

    
5469
def _ExpandCheckDisks(instance, disks):
5470
  """Return the instance disks selected by the disks list
5471

5472
  @type disks: list of L{objects.Disk} or None
5473
  @param disks: selected disks
5474
  @rtype: list of L{objects.Disk}
5475
  @return: selected instance disks to act on
5476

5477
  """
5478
  if disks is None:
5479
    return instance.disks
5480
  else:
5481
    if not set(disks).issubset(instance.disks):
5482
      raise errors.ProgrammerError("Can only act on disks belonging to the"
5483
                                   " target instance")
5484
    return disks
5485

    
5486

    
5487
def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
5488
  """Shutdown block devices of an instance.
5489

5490
  This does the shutdown on all nodes of the instance.
5491

5492
  If the ignore_primary is false, errors on the primary node are
5493
  ignored.
5494

5495
  """
5496
  all_result = True
5497
  disks = _ExpandCheckDisks(instance, disks)
5498

    
5499
  for disk in disks:
5500
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
5501
      lu.cfg.SetDiskID(top_disk, node)
5502
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
5503
      msg = result.fail_msg
5504
      if msg:
5505
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
5506
                      disk.iv_name, node, msg)
5507
        if ((node == instance.primary_node and not ignore_primary) or
5508
            (node != instance.primary_node and not result.offline)):
5509
          all_result = False
5510
  return all_result
5511

    
5512

    
5513
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
5514
  """Checks if a node has enough free memory.
5515

5516
  This function check if a given node has the needed amount of free
5517
  memory. In case the node has less memory or we cannot get the
5518
  information from the node, this function raise an OpPrereqError
5519
  exception.
5520

5521
  @type lu: C{LogicalUnit}
5522
  @param lu: a logical unit from which we get configuration data
5523
  @type node: C{str}
5524
  @param node: the node to check
5525
  @type reason: C{str}
5526
  @param reason: string to use in the error message
5527
  @type requested: C{int}
5528
  @param requested: the amount of memory in MiB to check for
5529
  @type hypervisor_name: C{str}
5530
  @param hypervisor_name: the hypervisor to ask for memory stats
5531
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
5532
      we cannot check the node
5533

5534
  """
5535
  nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name)
5536
  nodeinfo[node].Raise("Can't get data from node %s" % node,
5537
                       prereq=True, ecode=errors.ECODE_ENVIRON)
5538
  free_mem = nodeinfo[node].payload.get("memory_free", None)
5539
  if not isinstance(free_mem, int):
5540
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
5541
                               " was '%s'" % (node, free_mem),
5542
                               errors.ECODE_ENVIRON)
5543
  if requested > free_mem:
5544
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
5545
                               " needed %s MiB, available %s MiB" %
5546
                               (node, reason, requested, free_mem),
5547
                               errors.ECODE_NORES)
5548

    
5549

    
5550
def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
5551
  """Checks if nodes have enough free disk space in the all VGs.
5552

5553
  This function check if all given nodes have the needed amount of
5554
  free disk. In case any node has less disk or we cannot get the
5555
  information from the node, this function raise an OpPrereqError
5556
  exception.
5557

5558
  @type lu: C{LogicalUnit}
5559
  @param lu: a logical unit from which we get configuration data
5560
  @type nodenames: C{list}
5561
  @param nodenames: the list of node names to check
5562
  @type req_sizes: C{dict}
5563
  @param req_sizes: the hash of vg and corresponding amount of disk in
5564
      MiB to check for
5565
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
5566
      or we cannot check the node
5567

5568
  """
5569
  for vg, req_size in req_sizes.items():
5570
    _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
5571

    
5572

    
5573
def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
5574
  """Checks if nodes have enough free disk space in the specified VG.
5575

5576
  This function check if all given nodes have the needed amount of
5577
  free disk. In case any node has less disk or we cannot get the
5578
  information from the node, this function raise an OpPrereqError
5579
  exception.
5580

5581
  @type lu: C{LogicalUnit}
5582
  @param lu: a logical unit from which we get configuration data
5583
  @type nodenames: C{list}
5584
  @param nodenames: the list of node names to check
5585
  @type vg: C{str}
5586
  @param vg: the volume group to check
5587
  @type requested: C{int}
5588
  @param requested: the amount of disk in MiB to check for
5589
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
5590
      or we cannot check the node
5591

5592
  """
5593
  nodeinfo = lu.rpc.call_node_info(nodenames, vg, None)
5594
  for node in nodenames:
5595
    info = nodeinfo[node]
5596
    info.Raise("Cannot get current information from node %s" % node,
5597
               prereq=True, ecode=errors.ECODE_ENVIRON)
5598
    vg_free = info.payload.get("vg_free", None)
5599
    if not isinstance(vg_free, int):
5600
      raise errors.OpPrereqError("Can't compute free disk space on node"
5601
                                 " %s for vg %s, result was '%s'" %
5602
                                 (node, vg, vg_free), errors.ECODE_ENVIRON)
5603
    if requested > vg_free:
5604
      raise errors.OpPrereqError("Not enough disk space on target node %s"
5605
                                 " vg %s: required %d MiB, available %d MiB" %
5606
                                 (node, vg, requested, vg_free),
5607
                                 errors.ECODE_NORES)
5608

    
5609

    
5610
class LUInstanceStartup(LogicalUnit):
5611
  """Starts an instance.
5612

5613
  """
5614
  HPATH = "instance-start"
5615
  HTYPE = constants.HTYPE_INSTANCE
5616
  REQ_BGL = False
5617

    
5618
  def CheckArguments(self):
5619
    # extra beparams
5620
    if self.op.beparams:
5621
      # fill the beparams dict
5622
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5623

    
5624
  def ExpandNames(self):
5625
    self._ExpandAndLockInstance()
5626

    
5627
  def BuildHooksEnv(self):
5628
    """Build hooks env.
5629

5630
    This runs on master, primary and secondary nodes of the instance.
5631

5632
    """
5633
    env = {
5634
      "FORCE": self.op.force,
5635
      }
5636

    
5637
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5638

    
5639
    return env
5640

    
5641
  def BuildHooksNodes(self):
5642
    """Build hooks nodes.
5643

5644
    """
5645
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5646
    return (nl, nl)
5647

    
5648
  def CheckPrereq(self):
5649
    """Check prerequisites.
5650

5651
    This checks that the instance is in the cluster.
5652

5653
    """
5654
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5655
    assert self.instance is not None, \
5656
      "Cannot retrieve locked instance %s" % self.op.instance_name
5657

    
5658
    # extra hvparams
5659
    if self.op.hvparams:
5660
      # check hypervisor parameter syntax (locally)
5661
      cluster = self.cfg.GetClusterInfo()
5662
      utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5663
      filled_hvp = cluster.FillHV(instance)
5664
      filled_hvp.update(self.op.hvparams)
5665
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
5666
      hv_type.CheckParameterSyntax(filled_hvp)
5667
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
5668

    
5669
    self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
5670

    
5671
    if self.primary_offline and self.op.ignore_offline_nodes:
5672
      self.proc.LogWarning("Ignoring offline primary node")
5673

    
5674
      if self.op.hvparams or self.op.beparams:
5675
        self.proc.LogWarning("Overridden parameters are ignored")
5676
    else:
5677
      _CheckNodeOnline(self, instance.primary_node)
5678

    
5679
      bep = self.cfg.GetClusterInfo().FillBE(instance)
5680

    
5681
      # check bridges existence
5682
      _CheckInstanceBridgesExist(self, instance)
5683

    
5684
      remote_info = self.rpc.call_instance_info(instance.primary_node,
5685
                                                instance.name,
5686
                                                instance.hypervisor)
5687
      remote_info.Raise("Error checking node %s" % instance.primary_node,
5688
                        prereq=True, ecode=errors.ECODE_ENVIRON)
5689
      if not remote_info.payload: # not running already
5690
        _CheckNodeFreeMemory(self, instance.primary_node,
5691
                             "starting instance %s" % instance.name,
5692
                             bep[constants.BE_MEMORY], instance.hypervisor)
5693

    
5694
  def Exec(self, feedback_fn):
5695
    """Start the instance.
5696

5697
    """
5698
    instance = self.instance
5699
    force = self.op.force
5700

    
5701
    if not self.op.no_remember:
5702
      self.cfg.MarkInstanceUp(instance.name)
5703

    
5704
    if self.primary_offline:
5705
      assert self.op.ignore_offline_nodes
5706
      self.proc.LogInfo("Primary node offline, marked instance as started")
5707
    else:
5708
      node_current = instance.primary_node
5709

    
5710
      _StartInstanceDisks(self, instance, force)
5711

    
5712
      result = self.rpc.call_instance_start(node_current, instance,
5713
                                            self.op.hvparams, self.op.beparams,
5714
                                            self.op.startup_paused)
5715
      msg = result.fail_msg
5716
      if msg:
5717
        _ShutdownInstanceDisks(self, instance)
5718
        raise errors.OpExecError("Could not start instance: %s" % msg)
5719

    
5720

    
5721
class LUInstanceReboot(LogicalUnit):
5722
  """Reboot an instance.
5723

5724
  """
5725
  HPATH = "instance-reboot"
5726
  HTYPE = constants.HTYPE_INSTANCE
5727
  REQ_BGL = False
5728

    
5729
  def ExpandNames(self):
5730
    self._ExpandAndLockInstance()
5731

    
5732
  def BuildHooksEnv(self):
5733
    """Build hooks env.
5734

5735
    This runs on master, primary and secondary nodes of the instance.
5736

5737
    """
5738
    env = {
5739
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
5740
      "REBOOT_TYPE": self.op.reboot_type,
5741
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5742
      }
5743

    
5744
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5745

    
5746
    return env
5747

    
5748
  def BuildHooksNodes(self):
5749
    """Build hooks nodes.
5750

5751
    """
5752
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5753
    return (nl, nl)
5754

    
5755
  def CheckPrereq(self):
5756
    """Check prerequisites.
5757

5758
    This checks that the instance is in the cluster.
5759

5760
    """
5761
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5762
    assert self.instance is not None, \
5763
      "Cannot retrieve locked instance %s" % self.op.instance_name
5764

    
5765
    _CheckNodeOnline(self, instance.primary_node)
5766

    
5767
    # check bridges existence
5768
    _CheckInstanceBridgesExist(self, instance)
5769

    
5770
  def Exec(self, feedback_fn):
5771
    """Reboot the instance.
5772

5773
    """
5774
    instance = self.instance
5775
    ignore_secondaries = self.op.ignore_secondaries
5776
    reboot_type = self.op.reboot_type
5777

    
5778
    remote_info = self.rpc.call_instance_info(instance.primary_node,
5779
                                              instance.name,
5780
                                              instance.hypervisor)
5781
    remote_info.Raise("Error checking node %s" % instance.primary_node)
5782
    instance_running = bool(remote_info.payload)
5783

    
5784
    node_current = instance.primary_node
5785

    
5786
    if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
5787
                                            constants.INSTANCE_REBOOT_HARD]:
5788
      for disk in instance.disks:
5789
        self.cfg.SetDiskID(disk, node_current)
5790
      result = self.rpc.call_instance_reboot(node_current, instance,
5791
                                             reboot_type,
5792
                                             self.op.shutdown_timeout)
5793
      result.Raise("Could not reboot instance")
5794
    else:
5795
      if instance_running:
5796
        result = self.rpc.call_instance_shutdown(node_current, instance,
5797
                                                 self.op.shutdown_timeout)
5798
        result.Raise("Could not shutdown instance for full reboot")
5799
        _ShutdownInstanceDisks(self, instance)
5800
      else:
5801
        self.LogInfo("Instance %s was already stopped, starting now",
5802
                     instance.name)
5803
      _StartInstanceDisks(self, instance, ignore_secondaries)
5804
      result = self.rpc.call_instance_start(node_current, instance,
5805
                                            None, None, False)
5806
      msg = result.fail_msg
5807
      if msg:
5808
        _ShutdownInstanceDisks(self, instance)
5809
        raise errors.OpExecError("Could not start instance for"
5810
                                 " full reboot: %s" % msg)
5811

    
5812
    self.cfg.MarkInstanceUp(instance.name)
5813

    
5814

    
5815
class LUInstanceShutdown(LogicalUnit):
5816
  """Shutdown an instance.
5817

5818
  """
5819
  HPATH = "instance-stop"
5820
  HTYPE = constants.HTYPE_INSTANCE
5821
  REQ_BGL = False
5822

    
5823
  def ExpandNames(self):
5824
    self._ExpandAndLockInstance()
5825

    
5826
  def BuildHooksEnv(self):
5827
    """Build hooks env.
5828

5829
    This runs on master, primary and secondary nodes of the instance.
5830

5831
    """
5832
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5833
    env["TIMEOUT"] = self.op.timeout
5834
    return env
5835

    
5836
  def BuildHooksNodes(self):
5837
    """Build hooks nodes.
5838

5839
    """
5840
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5841
    return (nl, nl)
5842

    
5843
  def CheckPrereq(self):
5844
    """Check prerequisites.
5845

5846
    This checks that the instance is in the cluster.
5847

5848
    """
5849
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5850
    assert self.instance is not None, \
5851
      "Cannot retrieve locked instance %s" % self.op.instance_name
5852

    
5853
    self.primary_offline = \
5854
      self.cfg.GetNodeInfo(self.instance.primary_node).offline
5855

    
5856
    if self.primary_offline and self.op.ignore_offline_nodes:
5857
      self.proc.LogWarning("Ignoring offline primary node")
5858
    else:
5859
      _CheckNodeOnline(self, self.instance.primary_node)
5860

    
5861
  def Exec(self, feedback_fn):
5862
    """Shutdown the instance.
5863

5864
    """
5865
    instance = self.instance
5866
    node_current = instance.primary_node
5867
    timeout = self.op.timeout
5868

    
5869
    if not self.op.no_remember:
5870
      self.cfg.MarkInstanceDown(instance.name)
5871

    
5872
    if self.primary_offline:
5873
      assert self.op.ignore_offline_nodes
5874
      self.proc.LogInfo("Primary node offline, marked instance as stopped")
5875
    else:
5876
      result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
5877
      msg = result.fail_msg
5878
      if msg:
5879
        self.proc.LogWarning("Could not shutdown instance: %s" % msg)
5880

    
5881
      _ShutdownInstanceDisks(self, instance)
5882

    
5883

    
5884
class LUInstanceReinstall(LogicalUnit):
5885
  """Reinstall an instance.
5886

5887
  """
5888
  HPATH = "instance-reinstall"
5889
  HTYPE = constants.HTYPE_INSTANCE
5890
  REQ_BGL = False
5891

    
5892
  def ExpandNames(self):
5893
    self._ExpandAndLockInstance()
5894

    
5895
  def BuildHooksEnv(self):
5896
    """Build hooks env.
5897

5898
    This runs on master, primary and secondary nodes of the instance.
5899

5900
    """
5901
    return _BuildInstanceHookEnvByObject(self, self.instance)
5902

    
5903
  def BuildHooksNodes(self):
5904
    """Build hooks nodes.
5905

5906
    """
5907
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5908
    return (nl, nl)
5909

    
5910
  def CheckPrereq(self):
5911
    """Check prerequisites.
5912

5913
    This checks that the instance is in the cluster and is not running.
5914

5915
    """
5916
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5917
    assert instance is not None, \
5918
      "Cannot retrieve locked instance %s" % self.op.instance_name
5919
    _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
5920
                     " offline, cannot reinstall")
5921
    for node in instance.secondary_nodes:
5922
      _CheckNodeOnline(self, node, "Instance secondary node offline,"
5923
                       " cannot reinstall")
5924

    
5925
    if instance.disk_template == constants.DT_DISKLESS:
5926
      raise errors.OpPrereqError("Instance '%s' has no disks" %
5927
                                 self.op.instance_name,
5928
                                 errors.ECODE_INVAL)
5929
    _CheckInstanceDown(self, instance, "cannot reinstall")
5930

    
5931
    if self.op.os_type is not None:
5932
      # OS verification
5933
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
5934
      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
5935
      instance_os = self.op.os_type
5936
    else:
5937
      instance_os = instance.os
5938

    
5939
    nodelist = list(instance.all_nodes)
5940

    
5941
    if self.op.osparams:
5942
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
5943
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
5944
      self.os_inst = i_osdict # the new dict (without defaults)
5945
    else:
5946
      self.os_inst = None
5947

    
5948
    self.instance = instance
5949

    
5950
  def Exec(self, feedback_fn):
5951
    """Reinstall the instance.
5952

5953
    """
5954
    inst = self.instance
5955

    
5956
    if self.op.os_type is not None:
5957
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
5958
      inst.os = self.op.os_type
5959
      # Write to configuration
5960
      self.cfg.Update(inst, feedback_fn)
5961

    
5962
    _StartInstanceDisks(self, inst, None)
5963
    try:
5964
      feedback_fn("Running the instance OS create scripts...")
5965
      # FIXME: pass debug option from opcode to backend
5966
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
5967
                                             self.op.debug_level,
5968
                                             osparams=self.os_inst)
5969
      result.Raise("Could not install OS for instance %s on node %s" %
5970
                   (inst.name, inst.primary_node))
5971
    finally:
5972
      _ShutdownInstanceDisks(self, inst)
5973

    
5974

    
5975
class LUInstanceRecreateDisks(LogicalUnit):
5976
  """Recreate an instance's missing disks.
5977

5978
  """
5979
  HPATH = "instance-recreate-disks"
5980
  HTYPE = constants.HTYPE_INSTANCE
5981
  REQ_BGL = False
5982

    
5983
  def CheckArguments(self):
5984
    # normalise the disk list
5985
    self.op.disks = sorted(frozenset(self.op.disks))
5986

    
5987
  def ExpandNames(self):
5988
    self._ExpandAndLockInstance()
5989
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5990
    if self.op.nodes:
5991
      self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
5992
      self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
5993
    else:
5994
      self.needed_locks[locking.LEVEL_NODE] = []
5995

    
5996
  def DeclareLocks(self, level):
5997
    if level == locking.LEVEL_NODE:
5998
      # if we replace the nodes, we only need to lock the old primary,
5999
      # otherwise we need to lock all nodes for disk re-creation
6000
      primary_only = bool(self.op.nodes)
6001
      self._LockInstancesNodes(primary_only=primary_only)
6002

    
6003
  def BuildHooksEnv(self):
6004
    """Build hooks env.
6005

6006
    This runs on master, primary and secondary nodes of the instance.
6007

6008
    """
6009
    return _BuildInstanceHookEnvByObject(self, self.instance)
6010

    
6011
  def BuildHooksNodes(self):
6012
    """Build hooks nodes.
6013

6014
    """
6015
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6016
    return (nl, nl)
6017

    
6018
  def CheckPrereq(self):
6019
    """Check prerequisites.
6020

6021
    This checks that the instance is in the cluster and is not running.
6022

6023
    """
6024
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6025
    assert instance is not None, \
6026
      "Cannot retrieve locked instance %s" % self.op.instance_name
6027
    if self.op.nodes:
6028
      if len(self.op.nodes) != len(instance.all_nodes):
6029
        raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
6030
                                   " %d replacement nodes were specified" %
6031
                                   (instance.name, len(instance.all_nodes),
6032
                                    len(self.op.nodes)),
6033
                                   errors.ECODE_INVAL)
6034
      assert instance.disk_template != constants.DT_DRBD8 or \
6035
          len(self.op.nodes) == 2
6036
      assert instance.disk_template != constants.DT_PLAIN or \
6037
          len(self.op.nodes) == 1
6038
      primary_node = self.op.nodes[0]
6039
    else:
6040
      primary_node = instance.primary_node
6041
    _CheckNodeOnline(self, primary_node)
6042

    
6043
    if instance.disk_template == constants.DT_DISKLESS:
6044
      raise errors.OpPrereqError("Instance '%s' has no disks" %
6045
                                 self.op.instance_name, errors.ECODE_INVAL)
6046
    # if we replace nodes *and* the old primary is offline, we don't
6047
    # check
6048
    assert instance.primary_node in self.needed_locks[locking.LEVEL_NODE]
6049
    old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
6050
    if not (self.op.nodes and old_pnode.offline):
6051
      _CheckInstanceDown(self, instance, "cannot recreate disks")
6052

    
6053
    if not self.op.disks:
6054
      self.op.disks = range(len(instance.disks))
6055
    else:
6056
      for idx in self.op.disks:
6057
        if idx >= len(instance.disks):
6058
          raise errors.OpPrereqError("Invalid disk index '%s'" % idx,
6059
                                     errors.ECODE_INVAL)
6060
    if self.op.disks != range(len(instance.disks)) and self.op.nodes:
6061
      raise errors.OpPrereqError("Can't recreate disks partially and"
6062
                                 " change the nodes at the same time",
6063
                                 errors.ECODE_INVAL)
6064
    self.instance = instance
6065

    
6066
  def Exec(self, feedback_fn):
6067
    """Recreate the disks.
6068

6069
    """
6070
    instance = self.instance
6071

    
6072
    to_skip = []
6073
    mods = [] # keeps track of needed logical_id changes
6074

    
6075
    for idx, disk in enumerate(instance.disks):
6076
      if idx not in self.op.disks: # disk idx has not been passed in
6077
        to_skip.append(idx)
6078
        continue
6079
      # update secondaries for disks, if needed
6080
      if self.op.nodes:
6081
        if disk.dev_type == constants.LD_DRBD8:
6082
          # need to update the nodes and minors
6083
          assert len(self.op.nodes) == 2
6084
          assert len(disk.logical_id) == 6 # otherwise disk internals
6085
                                           # have changed
6086
          (_, _, old_port, _, _, old_secret) = disk.logical_id
6087
          new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name)
6088
          new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
6089
                    new_minors[0], new_minors[1], old_secret)
6090
          assert len(disk.logical_id) == len(new_id)
6091
          mods.append((idx, new_id))
6092

    
6093
    # now that we have passed all asserts above, we can apply the mods
6094
    # in a single run (to avoid partial changes)
6095
    for idx, new_id in mods:
6096
      instance.disks[idx].logical_id = new_id
6097

    
6098
    # change primary node, if needed
6099
    if self.op.nodes:
6100
      instance.primary_node = self.op.nodes[0]
6101
      self.LogWarning("Changing the instance's nodes, you will have to"
6102
                      " remove any disks left on the older nodes manually")
6103

    
6104
    if self.op.nodes:
6105
      self.cfg.Update(instance, feedback_fn)
6106

    
6107
    _CreateDisks(self, instance, to_skip=to_skip)
6108

    
6109

    
6110
class LUInstanceRename(LogicalUnit):
6111
  """Rename an instance.
6112

6113
  """
6114
  HPATH = "instance-rename"
6115
  HTYPE = constants.HTYPE_INSTANCE
6116

    
6117
  def CheckArguments(self):
6118
    """Check arguments.
6119

6120
    """
6121
    if self.op.ip_check and not self.op.name_check:
6122
      # TODO: make the ip check more flexible and not depend on the name check
6123
      raise errors.OpPrereqError("IP address check requires a name check",
6124
                                 errors.ECODE_INVAL)
6125

    
6126
  def BuildHooksEnv(self):
6127
    """Build hooks env.
6128

6129
    This runs on master, primary and secondary nodes of the instance.
6130

6131
    """
6132
    env = _BuildInstanceHookEnvByObject(self, self.instance)
6133
    env["INSTANCE_NEW_NAME"] = self.op.new_name
6134
    return env
6135

    
6136
  def BuildHooksNodes(self):
6137
    """Build hooks nodes.
6138

6139
    """
6140
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6141
    return (nl, nl)
6142

    
6143
  def CheckPrereq(self):
6144
    """Check prerequisites.
6145

6146
    This checks that the instance is in the cluster and is not running.
6147

6148
    """
6149
    self.op.instance_name = _ExpandInstanceName(self.cfg,
6150
                                                self.op.instance_name)
6151
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6152
    assert instance is not None
6153
    _CheckNodeOnline(self, instance.primary_node)
6154
    _CheckInstanceDown(self, instance, "cannot rename")
6155
    self.instance = instance
6156

    
6157
    new_name = self.op.new_name
6158
    if self.op.name_check:
6159
      hostname = netutils.GetHostname(name=new_name)
6160
      if hostname != new_name:
6161
        self.LogInfo("Resolved given name '%s' to '%s'", new_name,
6162
                     hostname.name)
6163
      if not utils.MatchNameComponent(self.op.new_name, [hostname.name]):
6164
        raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
6165
                                    " same as given hostname '%s'") %
6166
                                    (hostname.name, self.op.new_name),
6167
                                    errors.ECODE_INVAL)
6168
      new_name = self.op.new_name = hostname.name
6169
      if (self.op.ip_check and
6170
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
6171
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
6172
                                   (hostname.ip, new_name),
6173
                                   errors.ECODE_NOTUNIQUE)
6174

    
6175
    instance_list = self.cfg.GetInstanceList()
6176
    if new_name in instance_list and new_name != instance.name:
6177
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
6178
                                 new_name, errors.ECODE_EXISTS)
6179

    
6180
  def Exec(self, feedback_fn):
6181
    """Rename the instance.
6182

6183
    """
6184
    inst = self.instance
6185
    old_name = inst.name
6186

    
6187
    rename_file_storage = False
6188
    if (inst.disk_template in constants.DTS_FILEBASED and
6189
        self.op.new_name != inst.name):
6190
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
6191
      rename_file_storage = True
6192

    
6193
    self.cfg.RenameInstance(inst.name, self.op.new_name)
6194
    # Change the instance lock. This is definitely safe while we hold the BGL.
6195
    # Otherwise the new lock would have to be added in acquired mode.
6196
    assert self.REQ_BGL
6197
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
6198
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
6199

    
6200
    # re-read the instance from the configuration after rename
6201
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
6202

    
6203
    if rename_file_storage:
6204
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
6205
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
6206
                                                     old_file_storage_dir,
6207
                                                     new_file_storage_dir)
6208
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
6209
                   " (but the instance has been renamed in Ganeti)" %
6210
                   (inst.primary_node, old_file_storage_dir,
6211
                    new_file_storage_dir))
6212

    
6213
    _StartInstanceDisks(self, inst, None)
6214
    try:
6215
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
6216
                                                 old_name, self.op.debug_level)
6217
      msg = result.fail_msg
6218
      if msg:
6219
        msg = ("Could not run OS rename script for instance %s on node %s"
6220
               " (but the instance has been renamed in Ganeti): %s" %
6221
               (inst.name, inst.primary_node, msg))
6222
        self.proc.LogWarning(msg)
6223
    finally:
6224
      _ShutdownInstanceDisks(self, inst)
6225

    
6226
    return inst.name
6227

    
6228

    
6229
class LUInstanceRemove(LogicalUnit):
6230
  """Remove an instance.
6231

6232
  """
6233
  HPATH = "instance-remove"
6234
  HTYPE = constants.HTYPE_INSTANCE
6235
  REQ_BGL = False
6236

    
6237
  def ExpandNames(self):
6238
    self._ExpandAndLockInstance()
6239
    self.needed_locks[locking.LEVEL_NODE] = []
6240
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6241

    
6242
  def DeclareLocks(self, level):
6243
    if level == locking.LEVEL_NODE:
6244
      self._LockInstancesNodes()
6245

    
6246
  def BuildHooksEnv(self):
6247
    """Build hooks env.
6248

6249
    This runs on master, primary and secondary nodes of the instance.
6250

6251
    """
6252
    env = _BuildInstanceHookEnvByObject(self, self.instance)
6253
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
6254
    return env
6255

    
6256
  def BuildHooksNodes(self):
6257
    """Build hooks nodes.
6258

6259
    """
6260
    nl = [self.cfg.GetMasterNode()]
6261
    nl_post = list(self.instance.all_nodes) + nl
6262
    return (nl, nl_post)
6263

    
6264
  def CheckPrereq(self):
6265
    """Check prerequisites.
6266

6267
    This checks that the instance is in the cluster.
6268

6269
    """
6270
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6271
    assert self.instance is not None, \
6272
      "Cannot retrieve locked instance %s" % self.op.instance_name
6273

    
6274
  def Exec(self, feedback_fn):
6275
    """Remove the instance.
6276

6277
    """
6278
    instance = self.instance
6279
    logging.info("Shutting down instance %s on node %s",
6280
                 instance.name, instance.primary_node)
6281

    
6282
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
6283
                                             self.op.shutdown_timeout)
6284
    msg = result.fail_msg
6285
    if msg:
6286
      if self.op.ignore_failures:
6287
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
6288
      else:
6289
        raise errors.OpExecError("Could not shutdown instance %s on"
6290
                                 " node %s: %s" %
6291
                                 (instance.name, instance.primary_node, msg))
6292

    
6293
    _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
6294

    
6295

    
6296
def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
6297
  """Utility function to remove an instance.
6298

6299
  """
6300
  logging.info("Removing block devices for instance %s", instance.name)
6301

    
6302
  if not _RemoveDisks(lu, instance):
6303
    if not ignore_failures:
6304
      raise errors.OpExecError("Can't remove instance's disks")
6305
    feedback_fn("Warning: can't remove instance's disks")
6306

    
6307
  logging.info("Removing instance %s out of cluster config", instance.name)
6308

    
6309
  lu.cfg.RemoveInstance(instance.name)
6310

    
6311
  assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
6312
    "Instance lock removal conflict"
6313

    
6314
  # Remove lock for the instance
6315
  lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
6316

    
6317

    
6318
class LUInstanceQuery(NoHooksLU):
6319
  """Logical unit for querying instances.
6320

6321
  """
6322
  # pylint: disable-msg=W0142
6323
  REQ_BGL = False
6324

    
6325
  def CheckArguments(self):
6326
    self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
6327
                             self.op.output_fields, self.op.use_locking)
6328

    
6329
  def ExpandNames(self):
6330
    self.iq.ExpandNames(self)
6331

    
6332
  def DeclareLocks(self, level):
6333
    self.iq.DeclareLocks(self, level)
6334

    
6335
  def Exec(self, feedback_fn):
6336
    return self.iq.OldStyleQuery(self)
6337

    
6338

    
6339
class LUInstanceFailover(LogicalUnit):
6340
  """Failover an instance.
6341

6342
  """
6343
  HPATH = "instance-failover"
6344
  HTYPE = constants.HTYPE_INSTANCE
6345
  REQ_BGL = False
6346

    
6347
  def CheckArguments(self):
6348
    """Check the arguments.
6349

6350
    """
6351
    self.iallocator = getattr(self.op, "iallocator", None)
6352
    self.target_node = getattr(self.op, "target_node", None)
6353

    
6354
  def ExpandNames(self):
6355
    self._ExpandAndLockInstance()
6356

    
6357
    if self.op.target_node is not None:
6358
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6359

    
6360
    self.needed_locks[locking.LEVEL_NODE] = []
6361
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6362

    
6363
    ignore_consistency = self.op.ignore_consistency
6364
    shutdown_timeout = self.op.shutdown_timeout
6365
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
6366
                                       cleanup=False,
6367
                                       failover=True,
6368
                                       ignore_consistency=ignore_consistency,
6369
                                       shutdown_timeout=shutdown_timeout)
6370
    self.tasklets = [self._migrater]
6371

    
6372
  def DeclareLocks(self, level):
6373
    if level == locking.LEVEL_NODE:
6374
      instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
6375
      if instance.disk_template in constants.DTS_EXT_MIRROR:
6376
        if self.op.target_node is None:
6377
          self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6378
        else:
6379
          self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
6380
                                                   self.op.target_node]
6381
        del self.recalculate_locks[locking.LEVEL_NODE]
6382
      else:
6383
        self._LockInstancesNodes()
6384

    
6385
  def BuildHooksEnv(self):
6386
    """Build hooks env.
6387

6388
    This runs on master, primary and secondary nodes of the instance.
6389

6390
    """
6391
    instance = self._migrater.instance
6392
    source_node = instance.primary_node
6393
    target_node = self.op.target_node
6394
    env = {
6395
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
6396
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6397
      "OLD_PRIMARY": source_node,
6398
      "NEW_PRIMARY": target_node,
6399
      }
6400

    
6401
    if instance.disk_template in constants.DTS_INT_MIRROR:
6402
      env["OLD_SECONDARY"] = instance.secondary_nodes[0]
6403
      env["NEW_SECONDARY"] = source_node
6404
    else:
6405
      env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
6406

    
6407
    env.update(_BuildInstanceHookEnvByObject(self, instance))
6408

    
6409
    return env
6410

    
6411
  def BuildHooksNodes(self):
6412
    """Build hooks nodes.
6413

6414
    """
6415
    instance = self._migrater.instance
6416
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
6417
    return (nl, nl + [instance.primary_node])
6418

    
6419

    
6420
class LUInstanceMigrate(LogicalUnit):
6421
  """Migrate an instance.
6422

6423
  This is migration without shutting down, compared to the failover,
6424
  which is done with shutdown.
6425

6426
  """
6427
  HPATH = "instance-migrate"
6428
  HTYPE = constants.HTYPE_INSTANCE
6429
  REQ_BGL = False
6430

    
6431
  def ExpandNames(self):
6432
    self._ExpandAndLockInstance()
6433

    
6434
    if self.op.target_node is not None:
6435
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6436

    
6437
    self.needed_locks[locking.LEVEL_NODE] = []
6438
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6439

    
6440
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
6441
                                       cleanup=self.op.cleanup,
6442
                                       failover=False,
6443
                                       fallback=self.op.allow_failover)
6444
    self.tasklets = [self._migrater]
6445

    
6446
  def DeclareLocks(self, level):
6447
    if level == locking.LEVEL_NODE:
6448
      instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
6449
      if instance.disk_template in constants.DTS_EXT_MIRROR:
6450
        if self.op.target_node is None:
6451
          self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6452
        else:
6453
          self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
6454
                                                   self.op.target_node]
6455
        del self.recalculate_locks[locking.LEVEL_NODE]
6456
      else:
6457
        self._LockInstancesNodes()
6458

    
6459
  def BuildHooksEnv(self):
6460
    """Build hooks env.
6461

6462
    This runs on master, primary and secondary nodes of the instance.
6463

6464
    """
6465
    instance = self._migrater.instance
6466
    source_node = instance.primary_node
6467
    target_node = self.op.target_node
6468
    env = _BuildInstanceHookEnvByObject(self, instance)
6469
    env.update({
6470
      "MIGRATE_LIVE": self._migrater.live,
6471
      "MIGRATE_CLEANUP": self.op.cleanup,
6472
      "OLD_PRIMARY": source_node,
6473
      "NEW_PRIMARY": target_node,
6474
      })
6475

    
6476
    if instance.disk_template in constants.DTS_INT_MIRROR:
6477
      env["OLD_SECONDARY"] = target_node
6478
      env["NEW_SECONDARY"] = source_node
6479
    else:
6480
      env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None
6481

    
6482
    return env
6483

    
6484
  def BuildHooksNodes(self):
6485
    """Build hooks nodes.
6486

6487
    """
6488
    instance = self._migrater.instance
6489
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
6490
    return (nl, nl + [instance.primary_node])
6491

    
6492

    
6493
class LUInstanceMove(LogicalUnit):
6494
  """Move an instance by data-copying.
6495

6496
  """
6497
  HPATH = "instance-move"
6498
  HTYPE = constants.HTYPE_INSTANCE
6499
  REQ_BGL = False
6500

    
6501
  def ExpandNames(self):
6502
    self._ExpandAndLockInstance()
6503
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6504
    self.op.target_node = target_node
6505
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
6506
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6507

    
6508
  def DeclareLocks(self, level):
6509
    if level == locking.LEVEL_NODE:
6510
      self._LockInstancesNodes(primary_only=True)
6511

    
6512
  def BuildHooksEnv(self):
6513
    """Build hooks env.
6514

6515
    This runs on master, primary and secondary nodes of the instance.
6516

6517
    """
6518
    env = {
6519
      "TARGET_NODE": self.op.target_node,
6520
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6521
      }
6522
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6523
    return env
6524

    
6525
  def BuildHooksNodes(self):
6526
    """Build hooks nodes.
6527

6528
    """
6529
    nl = [
6530
      self.cfg.GetMasterNode(),
6531
      self.instance.primary_node,
6532
      self.op.target_node,
6533
      ]
6534
    return (nl, nl)
6535

    
6536
  def CheckPrereq(self):
6537
    """Check prerequisites.
6538

6539
    This checks that the instance is in the cluster.
6540

6541
    """
6542
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6543
    assert self.instance is not None, \
6544
      "Cannot retrieve locked instance %s" % self.op.instance_name
6545

    
6546
    node = self.cfg.GetNodeInfo(self.op.target_node)
6547
    assert node is not None, \
6548
      "Cannot retrieve locked node %s" % self.op.target_node
6549

    
6550
    self.target_node = target_node = node.name
6551

    
6552
    if target_node == instance.primary_node:
6553
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
6554
                                 (instance.name, target_node),
6555
                                 errors.ECODE_STATE)
6556

    
6557
    bep = self.cfg.GetClusterInfo().FillBE(instance)
6558

    
6559
    for idx, dsk in enumerate(instance.disks):
6560
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
6561
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
6562
                                   " cannot copy" % idx, errors.ECODE_STATE)
6563

    
6564
    _CheckNodeOnline(self, target_node)
6565
    _CheckNodeNotDrained(self, target_node)
6566
    _CheckNodeVmCapable(self, target_node)
6567

    
6568
    if instance.admin_up:
6569
      # check memory requirements on the secondary node
6570
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
6571
                           instance.name, bep[constants.BE_MEMORY],
6572
                           instance.hypervisor)
6573
    else:
6574
      self.LogInfo("Not checking memory on the secondary node as"
6575
                   " instance will not be started")
6576

    
6577
    # check bridge existance
6578
    _CheckInstanceBridgesExist(self, instance, node=target_node)
6579

    
6580
  def Exec(self, feedback_fn):
6581
    """Move an instance.
6582

6583
    The move is done by shutting it down on its present node, copying
6584
    the data over (slow) and starting it on the new node.
6585

6586
    """
6587
    instance = self.instance
6588

    
6589
    source_node = instance.primary_node
6590
    target_node = self.target_node
6591

    
6592
    self.LogInfo("Shutting down instance %s on source node %s",
6593
                 instance.name, source_node)
6594

    
6595
    result = self.rpc.call_instance_shutdown(source_node, instance,
6596
                                             self.op.shutdown_timeout)
6597
    msg = result.fail_msg
6598
    if msg:
6599
      if self.op.ignore_consistency:
6600
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
6601
                             " Proceeding anyway. Please make sure node"
6602
                             " %s is down. Error details: %s",
6603
                             instance.name, source_node, source_node, msg)
6604
      else:
6605
        raise errors.OpExecError("Could not shutdown instance %s on"
6606
                                 " node %s: %s" %
6607
                                 (instance.name, source_node, msg))
6608

    
6609
    # create the target disks
6610
    try:
6611
      _CreateDisks(self, instance, target_node=target_node)
6612
    except errors.OpExecError:
6613
      self.LogWarning("Device creation failed, reverting...")
6614
      try:
6615
        _RemoveDisks(self, instance, target_node=target_node)
6616
      finally:
6617
        self.cfg.ReleaseDRBDMinors(instance.name)
6618
        raise
6619

    
6620
    cluster_name = self.cfg.GetClusterInfo().cluster_name
6621

    
6622
    errs = []
6623
    # activate, get path, copy the data over
6624
    for idx, disk in enumerate(instance.disks):
6625
      self.LogInfo("Copying data for disk %d", idx)
6626
      result = self.rpc.call_blockdev_assemble(target_node, disk,
6627
                                               instance.name, True, idx)
6628
      if result.fail_msg:
6629
        self.LogWarning("Can't assemble newly created disk %d: %s",
6630
                        idx, result.fail_msg)
6631
        errs.append(result.fail_msg)
6632
        break
6633
      dev_path = result.payload
6634
      result = self.rpc.call_blockdev_export(source_node, disk,
6635
                                             target_node, dev_path,
6636
                                             cluster_name)
6637
      if result.fail_msg:
6638
        self.LogWarning("Can't copy data over for disk %d: %s",
6639
                        idx, result.fail_msg)
6640
        errs.append(result.fail_msg)
6641
        break
6642

    
6643
    if errs:
6644
      self.LogWarning("Some disks failed to copy, aborting")
6645
      try:
6646
        _RemoveDisks(self, instance, target_node=target_node)
6647
      finally:
6648
        self.cfg.ReleaseDRBDMinors(instance.name)
6649
        raise errors.OpExecError("Errors during disk copy: %s" %
6650
                                 (",".join(errs),))
6651

    
6652
    instance.primary_node = target_node
6653
    self.cfg.Update(instance, feedback_fn)
6654

    
6655
    self.LogInfo("Removing the disks on the original node")
6656
    _RemoveDisks(self, instance, target_node=source_node)
6657

    
6658
    # Only start the instance if it's marked as up
6659
    if instance.admin_up:
6660
      self.LogInfo("Starting instance %s on node %s",
6661
                   instance.name, target_node)
6662

    
6663
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
6664
                                           ignore_secondaries=True)
6665
      if not disks_ok:
6666
        _ShutdownInstanceDisks(self, instance)
6667
        raise errors.OpExecError("Can't activate the instance's disks")
6668

    
6669
      result = self.rpc.call_instance_start(target_node, instance,
6670
                                            None, None, False)
6671
      msg = result.fail_msg
6672
      if msg:
6673
        _ShutdownInstanceDisks(self, instance)
6674
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
6675
                                 (instance.name, target_node, msg))
6676

    
6677

    
6678
class LUNodeMigrate(LogicalUnit):
6679
  """Migrate all instances from a node.
6680

6681
  """
6682
  HPATH = "node-migrate"
6683
  HTYPE = constants.HTYPE_NODE
6684
  REQ_BGL = False
6685

    
6686
  def CheckArguments(self):
6687
    pass
6688

    
6689
  def ExpandNames(self):
6690
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6691

    
6692
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
6693
    self.needed_locks = {
6694
      locking.LEVEL_NODE: [self.op.node_name],
6695
      }
6696

    
6697
  def BuildHooksEnv(self):
6698
    """Build hooks env.
6699

6700
    This runs on the master, the primary and all the secondaries.
6701

6702
    """
6703
    return {
6704
      "NODE_NAME": self.op.node_name,
6705
      }
6706

    
6707
  def BuildHooksNodes(self):
6708
    """Build hooks nodes.
6709

6710
    """
6711
    nl = [self.cfg.GetMasterNode()]
6712
    return (nl, nl)
6713

    
6714
  def CheckPrereq(self):
6715
    pass
6716

    
6717
  def Exec(self, feedback_fn):
6718
    # Prepare jobs for migration instances
6719
    jobs = [
6720
      [opcodes.OpInstanceMigrate(instance_name=inst.name,
6721
                                 mode=self.op.mode,
6722
                                 live=self.op.live,
6723
                                 iallocator=self.op.iallocator,
6724
                                 target_node=self.op.target_node)]
6725
      for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)
6726
      ]
6727

    
6728
    # TODO: Run iallocator in this opcode and pass correct placement options to
6729
    # OpInstanceMigrate. Since other jobs can modify the cluster between
6730
    # running the iallocator and the actual migration, a good consistency model
6731
    # will have to be found.
6732

    
6733
    assert (frozenset(self.glm.list_owned(locking.LEVEL_NODE)) ==
6734
            frozenset([self.op.node_name]))
6735

    
6736
    return ResultWithJobs(jobs)
6737

    
6738

    
6739
class TLMigrateInstance(Tasklet):
6740
  """Tasklet class for instance migration.
6741

6742
  @type live: boolean
6743
  @ivar live: whether the migration will be done live or non-live;
6744
      this variable is initalized only after CheckPrereq has run
6745
  @type cleanup: boolean
6746
  @ivar cleanup: Wheater we cleanup from a failed migration
6747
  @type iallocator: string
6748
  @ivar iallocator: The iallocator used to determine target_node
6749
  @type target_node: string
6750
  @ivar target_node: If given, the target_node to reallocate the instance to
6751
  @type failover: boolean
6752
  @ivar failover: Whether operation results in failover or migration
6753
  @type fallback: boolean
6754
  @ivar fallback: Whether fallback to failover is allowed if migration not
6755
                  possible
6756
  @type ignore_consistency: boolean
6757
  @ivar ignore_consistency: Wheter we should ignore consistency between source
6758
                            and target node
6759
  @type shutdown_timeout: int
6760
  @ivar shutdown_timeout: In case of failover timeout of the shutdown
6761

6762
  """
6763
  def __init__(self, lu, instance_name, cleanup=False,
6764
               failover=False, fallback=False,
6765
               ignore_consistency=False,
6766
               shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT):
6767
    """Initializes this class.
6768

6769
    """
6770
    Tasklet.__init__(self, lu)
6771

    
6772
    # Parameters
6773
    self.instance_name = instance_name
6774
    self.cleanup = cleanup
6775
    self.live = False # will be overridden later
6776
    self.failover = failover
6777
    self.fallback = fallback
6778
    self.ignore_consistency = ignore_consistency
6779
    self.shutdown_timeout = shutdown_timeout
6780

    
6781
  def CheckPrereq(self):
6782
    """Check prerequisites.
6783

6784
    This checks that the instance is in the cluster.
6785

6786
    """
6787
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
6788
    instance = self.cfg.GetInstanceInfo(instance_name)
6789
    assert instance is not None
6790
    self.instance = instance
6791

    
6792
    if (not self.cleanup and not instance.admin_up and not self.failover and
6793
        self.fallback):
6794
      self.lu.LogInfo("Instance is marked down, fallback allowed, switching"
6795
                      " to failover")
6796
      self.failover = True
6797

    
6798
    if instance.disk_template not in constants.DTS_MIRRORED:
6799
      if self.failover:
6800
        text = "failovers"
6801
      else:
6802
        text = "migrations"
6803
      raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
6804
                                 " %s" % (instance.disk_template, text),
6805
                                 errors.ECODE_STATE)
6806

    
6807
    if instance.disk_template in constants.DTS_EXT_MIRROR:
6808
      _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
6809

    
6810
      if self.lu.op.iallocator:
6811
        self._RunAllocator()
6812
      else:
6813
        # We set set self.target_node as it is required by
6814
        # BuildHooksEnv
6815
        self.target_node = self.lu.op.target_node
6816

    
6817
      # self.target_node is already populated, either directly or by the
6818
      # iallocator run
6819
      target_node = self.target_node
6820
      if self.target_node == instance.primary_node:
6821
        raise errors.OpPrereqError("Cannot migrate instance %s"
6822
                                   " to its primary (%s)" %
6823
                                   (instance.name, instance.primary_node))
6824

    
6825
      if len(self.lu.tasklets) == 1:
6826
        # It is safe to release locks only when we're the only tasklet
6827
        # in the LU
6828
        _ReleaseLocks(self.lu, locking.LEVEL_NODE,
6829
                      keep=[instance.primary_node, self.target_node])
6830

    
6831
    else:
6832
      secondary_nodes = instance.secondary_nodes
6833
      if not secondary_nodes:
6834
        raise errors.ConfigurationError("No secondary node but using"
6835
                                        " %s disk template" %
6836
                                        instance.disk_template)
6837
      target_node = secondary_nodes[0]
6838
      if self.lu.op.iallocator or (self.lu.op.target_node and
6839
                                   self.lu.op.target_node != target_node):
6840
        if self.failover:
6841
          text = "failed over"
6842
        else:
6843
          text = "migrated"
6844
        raise errors.OpPrereqError("Instances with disk template %s cannot"
6845
                                   " be %s to arbitrary nodes"
6846
                                   " (neither an iallocator nor a target"
6847
                                   " node can be passed)" %
6848
                                   (instance.disk_template, text),
6849
                                   errors.ECODE_INVAL)
6850

    
6851
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
6852

    
6853
    # check memory requirements on the secondary node
6854
    if not self.failover or instance.admin_up:
6855
      _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
6856
                           instance.name, i_be[constants.BE_MEMORY],
6857
                           instance.hypervisor)
6858
    else:
6859
      self.lu.LogInfo("Not checking memory on the secondary node as"
6860
                      " instance will not be started")
6861

    
6862
    # check bridge existance
6863
    _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
6864

    
6865
    if not self.cleanup:
6866
      _CheckNodeNotDrained(self.lu, target_node)
6867
      if not self.failover:
6868
        result = self.rpc.call_instance_migratable(instance.primary_node,
6869
                                                   instance)
6870
        if result.fail_msg and self.fallback:
6871
          self.lu.LogInfo("Can't migrate, instance offline, fallback to"
6872
                          " failover")
6873
          self.failover = True
6874
        else:
6875
          result.Raise("Can't migrate, please use failover",
6876
                       prereq=True, ecode=errors.ECODE_STATE)
6877

    
6878
    assert not (self.failover and self.cleanup)
6879

    
6880
    if not self.failover:
6881
      if self.lu.op.live is not None and self.lu.op.mode is not None:
6882
        raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
6883
                                   " parameters are accepted",
6884
                                   errors.ECODE_INVAL)
6885
      if self.lu.op.live is not None:
6886
        if self.lu.op.live:
6887
          self.lu.op.mode = constants.HT_MIGRATION_LIVE
6888
        else:
6889
          self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
6890
        # reset the 'live' parameter to None so that repeated
6891
        # invocations of CheckPrereq do not raise an exception
6892
        self.lu.op.live = None
6893
      elif self.lu.op.mode is None:
6894
        # read the default value from the hypervisor
6895
        i_hv = self.cfg.GetClusterInfo().FillHV(self.instance,
6896
                                                skip_globals=False)
6897
        self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
6898

    
6899
      self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
6900
    else:
6901
      # Failover is never live
6902
      self.live = False
6903

    
6904
  def _RunAllocator(self):
6905
    """Run the allocator based on input opcode.
6906

6907
    """
6908
    ial = IAllocator(self.cfg, self.rpc,
6909
                     mode=constants.IALLOCATOR_MODE_RELOC,
6910
                     name=self.instance_name,
6911
                     # TODO See why hail breaks with a single node below
6912
                     relocate_from=[self.instance.primary_node,
6913
                                    self.instance.primary_node],
6914
                     )
6915

    
6916
    ial.Run(self.lu.op.iallocator)
6917

    
6918
    if not ial.success:
6919
      raise errors.OpPrereqError("Can't compute nodes using"
6920
                                 " iallocator '%s': %s" %
6921
                                 (self.lu.op.iallocator, ial.info),
6922
                                 errors.ECODE_NORES)
6923
    if len(ial.result) != ial.required_nodes:
6924
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6925
                                 " of nodes (%s), required %s" %
6926
                                 (self.lu.op.iallocator, len(ial.result),
6927
                                  ial.required_nodes), errors.ECODE_FAULT)
6928
    self.target_node = ial.result[0]
6929
    self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
6930
                 self.instance_name, self.lu.op.iallocator,
6931
                 utils.CommaJoin(ial.result))
6932

    
6933
  def _WaitUntilSync(self):
6934
    """Poll with custom rpc for disk sync.
6935

6936
    This uses our own step-based rpc call.
6937

6938
    """
6939
    self.feedback_fn("* wait until resync is done")
6940
    all_done = False
6941
    while not all_done:
6942
      all_done = True
6943
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
6944
                                            self.nodes_ip,
6945
                                            self.instance.disks)
6946
      min_percent = 100
6947
      for node, nres in result.items():
6948
        nres.Raise("Cannot resync disks on node %s" % node)
6949
        node_done, node_percent = nres.payload
6950
        all_done = all_done and node_done
6951
        if node_percent is not None:
6952
          min_percent = min(min_percent, node_percent)
6953
      if not all_done:
6954
        if min_percent < 100:
6955
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
6956
        time.sleep(2)
6957

    
6958
  def _EnsureSecondary(self, node):
6959
    """Demote a node to secondary.
6960

6961
    """
6962
    self.feedback_fn("* switching node %s to secondary mode" % node)
6963

    
6964
    for dev in self.instance.disks:
6965
      self.cfg.SetDiskID(dev, node)
6966

    
6967
    result = self.rpc.call_blockdev_close(node, self.instance.name,
6968
                                          self.instance.disks)
6969
    result.Raise("Cannot change disk to secondary on node %s" % node)
6970

    
6971
  def _GoStandalone(self):
6972
    """Disconnect from the network.
6973

6974
    """
6975
    self.feedback_fn("* changing into standalone mode")
6976
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
6977
                                               self.instance.disks)
6978
    for node, nres in result.items():
6979
      nres.Raise("Cannot disconnect disks node %s" % node)
6980

    
6981
  def _GoReconnect(self, multimaster):
6982
    """Reconnect to the network.
6983

6984
    """
6985
    if multimaster:
6986
      msg = "dual-master"
6987
    else:
6988
      msg = "single-master"
6989
    self.feedback_fn("* changing disks into %s mode" % msg)
6990
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
6991
                                           self.instance.disks,
6992
                                           self.instance.name, multimaster)
6993
    for node, nres in result.items():
6994
      nres.Raise("Cannot change disks config on node %s" % node)
6995

    
6996
  def _ExecCleanup(self):
6997
    """Try to cleanup after a failed migration.
6998

6999
    The cleanup is done by:
7000
      - check that the instance is running only on one node
7001
        (and update the config if needed)
7002
      - change disks on its secondary node to secondary
7003
      - wait until disks are fully synchronized
7004
      - disconnect from the network
7005
      - change disks into single-master mode
7006
      - wait again until disks are fully synchronized
7007

7008
    """
7009
    instance = self.instance
7010
    target_node = self.target_node
7011
    source_node = self.source_node
7012

    
7013
    # check running on only one node
7014
    self.feedback_fn("* checking where the instance actually runs"
7015
                     " (if this hangs, the hypervisor might be in"
7016
                     " a bad state)")
7017
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
7018
    for node, result in ins_l.items():
7019
      result.Raise("Can't contact node %s" % node)
7020

    
7021
    runningon_source = instance.name in ins_l[source_node].payload
7022
    runningon_target = instance.name in ins_l[target_node].payload
7023

    
7024
    if runningon_source and runningon_target:
7025
      raise errors.OpExecError("Instance seems to be running on two nodes,"
7026
                               " or the hypervisor is confused; you will have"
7027
                               " to ensure manually that it runs only on one"
7028
                               " and restart this operation")
7029

    
7030
    if not (runningon_source or runningon_target):
7031
      raise errors.OpExecError("Instance does not seem to be running at all;"
7032
                               " in this case it's safer to repair by"
7033
                               " running 'gnt-instance stop' to ensure disk"
7034
                               " shutdown, and then restarting it")
7035

    
7036
    if runningon_target:
7037
      # the migration has actually succeeded, we need to update the config
7038
      self.feedback_fn("* instance running on secondary node (%s),"
7039
                       " updating config" % target_node)
7040
      instance.primary_node = target_node
7041
      self.cfg.Update(instance, self.feedback_fn)
7042
      demoted_node = source_node
7043
    else:
7044
      self.feedback_fn("* instance confirmed to be running on its"
7045
                       " primary node (%s)" % source_node)
7046
      demoted_node = target_node
7047

    
7048
    if instance.disk_template in constants.DTS_INT_MIRROR:
7049
      self._EnsureSecondary(demoted_node)
7050
      try:
7051
        self._WaitUntilSync()
7052
      except errors.OpExecError:
7053
        # we ignore here errors, since if the device is standalone, it
7054
        # won't be able to sync
7055
        pass
7056
      self._GoStandalone()
7057
      self._GoReconnect(False)
7058
      self._WaitUntilSync()
7059

    
7060
    self.feedback_fn("* done")
7061

    
7062
  def _RevertDiskStatus(self):
7063
    """Try to revert the disk status after a failed migration.
7064

7065
    """
7066
    target_node = self.target_node
7067
    if self.instance.disk_template in constants.DTS_EXT_MIRROR:
7068
      return
7069

    
7070
    try:
7071
      self._EnsureSecondary(target_node)
7072
      self._GoStandalone()
7073
      self._GoReconnect(False)
7074
      self._WaitUntilSync()
7075
    except errors.OpExecError, err:
7076
      self.lu.LogWarning("Migration failed and I can't reconnect the drives,"
7077
                         " please try to recover the instance manually;"
7078
                         " error '%s'" % str(err))
7079

    
7080
  def _AbortMigration(self):
7081
    """Call the hypervisor code to abort a started migration.
7082

7083
    """
7084
    instance = self.instance
7085
    target_node = self.target_node
7086
    migration_info = self.migration_info
7087

    
7088
    abort_result = self.rpc.call_finalize_migration(target_node,
7089
                                                    instance,
7090
                                                    migration_info,
7091
                                                    False)
7092
    abort_msg = abort_result.fail_msg
7093
    if abort_msg:
7094
      logging.error("Aborting migration failed on target node %s: %s",
7095
                    target_node, abort_msg)
7096
      # Don't raise an exception here, as we stil have to try to revert the
7097
      # disk status, even if this step failed.
7098

    
7099
  def _ExecMigration(self):
7100
    """Migrate an instance.
7101

7102
    The migrate is done by:
7103
      - change the disks into dual-master mode
7104
      - wait until disks are fully synchronized again
7105
      - migrate the instance
7106
      - change disks on the new secondary node (the old primary) to secondary
7107
      - wait until disks are fully synchronized
7108
      - change disks into single-master mode
7109

7110
    """
7111
    instance = self.instance
7112
    target_node = self.target_node
7113
    source_node = self.source_node
7114

    
7115
    self.feedback_fn("* checking disk consistency between source and target")
7116
    for dev in instance.disks:
7117
      if not _CheckDiskConsistency(self.lu, dev, target_node, False):
7118
        raise errors.OpExecError("Disk %s is degraded or not fully"
7119
                                 " synchronized on target node,"
7120
                                 " aborting migration" % dev.iv_name)
7121

    
7122
    # First get the migration information from the remote node
7123
    result = self.rpc.call_migration_info(source_node, instance)
7124
    msg = result.fail_msg
7125
    if msg:
7126
      log_err = ("Failed fetching source migration information from %s: %s" %
7127
                 (source_node, msg))
7128
      logging.error(log_err)
7129
      raise errors.OpExecError(log_err)
7130

    
7131
    self.migration_info = migration_info = result.payload
7132

    
7133
    if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
7134
      # Then switch the disks to master/master mode
7135
      self._EnsureSecondary(target_node)
7136
      self._GoStandalone()
7137
      self._GoReconnect(True)
7138
      self._WaitUntilSync()
7139

    
7140
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
7141
    result = self.rpc.call_accept_instance(target_node,
7142
                                           instance,
7143
                                           migration_info,
7144
                                           self.nodes_ip[target_node])
7145

    
7146
    msg = result.fail_msg
7147
    if msg:
7148
      logging.error("Instance pre-migration failed, trying to revert"
7149
                    " disk status: %s", msg)
7150
      self.feedback_fn("Pre-migration failed, aborting")
7151
      self._AbortMigration()
7152
      self._RevertDiskStatus()
7153
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
7154
                               (instance.name, msg))
7155

    
7156
    self.feedback_fn("* migrating instance to %s" % target_node)
7157
    result = self.rpc.call_instance_migrate(source_node, instance,
7158
                                            self.nodes_ip[target_node],
7159
                                            self.live)
7160
    msg = result.fail_msg
7161
    if msg:
7162
      logging.error("Instance migration failed, trying to revert"
7163
                    " disk status: %s", msg)
7164
      self.feedback_fn("Migration failed, aborting")
7165
      self._AbortMigration()
7166
      self._RevertDiskStatus()
7167
      raise errors.OpExecError("Could not migrate instance %s: %s" %
7168
                               (instance.name, msg))
7169

    
7170
    instance.primary_node = target_node
7171
    # distribute new instance config to the other nodes
7172
    self.cfg.Update(instance, self.feedback_fn)
7173

    
7174
    result = self.rpc.call_finalize_migration(target_node,
7175
                                              instance,
7176
                                              migration_info,
7177
                                              True)
7178
    msg = result.fail_msg
7179
    if msg:
7180
      logging.error("Instance migration succeeded, but finalization failed:"
7181
                    " %s", msg)
7182
      raise errors.OpExecError("Could not finalize instance migration: %s" %
7183
                               msg)
7184

    
7185
    if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
7186
      self._EnsureSecondary(source_node)
7187
      self._WaitUntilSync()
7188
      self._GoStandalone()
7189
      self._GoReconnect(False)
7190
      self._WaitUntilSync()
7191

    
7192
    self.feedback_fn("* done")
7193

    
7194
  def _ExecFailover(self):
7195
    """Failover an instance.
7196

7197
    The failover is done by shutting it down on its present node and
7198
    starting it on the secondary.
7199

7200
    """
7201
    instance = self.instance
7202
    primary_node = self.cfg.GetNodeInfo(instance.primary_node)
7203

    
7204
    source_node = instance.primary_node
7205
    target_node = self.target_node
7206

    
7207
    if instance.admin_up:
7208
      self.feedback_fn("* checking disk consistency between source and target")
7209
      for dev in instance.disks:
7210
        # for drbd, these are drbd over lvm
7211
        if not _CheckDiskConsistency(self.lu, dev, target_node, False):
7212
          if primary_node.offline:
7213
            self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
7214
                             " target node %s" %
7215
                             (primary_node.name, dev.iv_name, target_node))
7216
          elif not self.ignore_consistency:
7217
            raise errors.OpExecError("Disk %s is degraded on target node,"
7218
                                     " aborting failover" % dev.iv_name)
7219
    else:
7220
      self.feedback_fn("* not checking disk consistency as instance is not"
7221
                       " running")
7222

    
7223
    self.feedback_fn("* shutting down instance on source node")
7224
    logging.info("Shutting down instance %s on node %s",
7225
                 instance.name, source_node)
7226

    
7227
    result = self.rpc.call_instance_shutdown(source_node, instance,
7228
                                             self.shutdown_timeout)
7229
    msg = result.fail_msg
7230
    if msg:
7231
      if self.ignore_consistency or primary_node.offline:
7232
        self.lu.LogWarning("Could not shutdown instance %s on node %s,"
7233
                           " proceeding anyway; please make sure node"
7234
                           " %s is down; error details: %s",
7235
                           instance.name, source_node, source_node, msg)
7236
      else:
7237
        raise errors.OpExecError("Could not shutdown instance %s on"
7238
                                 " node %s: %s" %
7239
                                 (instance.name, source_node, msg))
7240

    
7241
    self.feedback_fn("* deactivating the instance's disks on source node")
7242
    if not _ShutdownInstanceDisks(self.lu, instance, ignore_primary=True):
7243
      raise errors.OpExecError("Can't shut down the instance's disks")
7244

    
7245
    instance.primary_node = target_node
7246
    # distribute new instance config to the other nodes
7247
    self.cfg.Update(instance, self.feedback_fn)
7248

    
7249
    # Only start the instance if it's marked as up
7250
    if instance.admin_up:
7251
      self.feedback_fn("* activating the instance's disks on target node %s" %
7252
                       target_node)
7253
      logging.info("Starting instance %s on node %s",
7254
                   instance.name, target_node)
7255

    
7256
      disks_ok, _ = _AssembleInstanceDisks(self.lu, instance,
7257
                                           ignore_secondaries=True)
7258
      if not disks_ok:
7259
        _ShutdownInstanceDisks(self.lu, instance)
7260
        raise errors.OpExecError("Can't activate the instance's disks")
7261

    
7262
      self.feedback_fn("* starting the instance on the target node %s" %
7263
                       target_node)
7264
      result = self.rpc.call_instance_start(target_node, instance, None, None,
7265
                                            False)
7266
      msg = result.fail_msg
7267
      if msg:
7268
        _ShutdownInstanceDisks(self.lu, instance)
7269
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
7270
                                 (instance.name, target_node, msg))
7271

    
7272
  def Exec(self, feedback_fn):
7273
    """Perform the migration.
7274

7275
    """
7276
    self.feedback_fn = feedback_fn
7277
    self.source_node = self.instance.primary_node
7278

    
7279
    # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
7280
    if self.instance.disk_template in constants.DTS_INT_MIRROR:
7281
      self.target_node = self.instance.secondary_nodes[0]
7282
      # Otherwise self.target_node has been populated either
7283
      # directly, or through an iallocator.
7284

    
7285
    self.all_nodes = [self.source_node, self.target_node]
7286
    self.nodes_ip = {
7287
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
7288
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
7289
      }
7290

    
7291
    if self.failover:
7292
      feedback_fn("Failover instance %s" % self.instance.name)
7293
      self._ExecFailover()
7294
    else:
7295
      feedback_fn("Migrating instance %s" % self.instance.name)
7296

    
7297
      if self.cleanup:
7298
        return self._ExecCleanup()
7299
      else:
7300
        return self._ExecMigration()
7301

    
7302

    
7303
def _CreateBlockDev(lu, node, instance, device, force_create,
7304
                    info, force_open):
7305
  """Create a tree of block devices on a given node.
7306

7307
  If this device type has to be created on secondaries, create it and
7308
  all its children.
7309

7310
  If not, just recurse to children keeping the same 'force' value.
7311

7312
  @param lu: the lu on whose behalf we execute
7313
  @param node: the node on which to create the device
7314
  @type instance: L{objects.Instance}
7315
  @param instance: the instance which owns the device
7316
  @type device: L{objects.Disk}
7317
  @param device: the device to create
7318
  @type force_create: boolean
7319
  @param force_create: whether to force creation of this device; this
7320
      will be change to True whenever we find a device which has
7321
      CreateOnSecondary() attribute
7322
  @param info: the extra 'metadata' we should attach to the device
7323
      (this will be represented as a LVM tag)
7324
  @type force_open: boolean
7325
  @param force_open: this parameter will be passes to the
7326
      L{backend.BlockdevCreate} function where it specifies
7327
      whether we run on primary or not, and it affects both
7328
      the child assembly and the device own Open() execution
7329

7330
  """
7331
  if device.CreateOnSecondary():
7332
    force_create = True
7333

    
7334
  if device.children:
7335
    for child in device.children:
7336
      _CreateBlockDev(lu, node, instance, child, force_create,
7337
                      info, force_open)
7338

    
7339
  if not force_create:
7340
    return
7341

    
7342
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
7343

    
7344

    
7345
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
7346
  """Create a single block device on a given node.
7347

7348
  This will not recurse over children of the device, so they must be
7349
  created in advance.
7350

7351
  @param lu: the lu on whose behalf we execute
7352
  @param node: the node on which to create the device
7353
  @type instance: L{objects.Instance}
7354
  @param instance: the instance which owns the device
7355
  @type device: L{objects.Disk}
7356
  @param device: the device to create
7357
  @param info: the extra 'metadata' we should attach to the device
7358
      (this will be represented as a LVM tag)
7359
  @type force_open: boolean
7360
  @param force_open: this parameter will be passes to the
7361
      L{backend.BlockdevCreate} function where it specifies
7362
      whether we run on primary or not, and it affects both
7363
      the child assembly and the device own Open() execution
7364

7365
  """
7366
  lu.cfg.SetDiskID(device, node)
7367
  result = lu.rpc.call_blockdev_create(node, device, device.size,
7368
                                       instance.name, force_open, info)
7369
  result.Raise("Can't create block device %s on"
7370
               " node %s for instance %s" % (device, node, instance.name))
7371
  if device.physical_id is None:
7372
    device.physical_id = result.payload
7373

    
7374

    
7375
def _GenerateUniqueNames(lu, exts):
7376
  """Generate a suitable LV name.
7377

7378
  This will generate a logical volume name for the given instance.
7379

7380
  """
7381
  results = []
7382
  for val in exts:
7383
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
7384
    results.append("%s%s" % (new_id, val))
7385
  return results
7386

    
7387

    
7388
def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
7389
                         iv_name, p_minor, s_minor):
7390
  """Generate a drbd8 device complete with its children.
7391

7392
  """
7393
  assert len(vgnames) == len(names) == 2
7394
  port = lu.cfg.AllocatePort()
7395
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
7396
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
7397
                          logical_id=(vgnames[0], names[0]))
7398
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
7399
                          logical_id=(vgnames[1], names[1]))
7400
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
7401
                          logical_id=(primary, secondary, port,
7402
                                      p_minor, s_minor,
7403
                                      shared_secret),
7404
                          children=[dev_data, dev_meta],
7405
                          iv_name=iv_name)
7406
  return drbd_dev
7407

    
7408

    
7409
def _GenerateDiskTemplate(lu, template_name,
7410
                          instance_name, primary_node,
7411
                          secondary_nodes, disk_info,
7412
                          file_storage_dir, file_driver,
7413
                          base_index, feedback_fn):
7414
  """Generate the entire disk layout for a given template type.
7415

7416
  """
7417
  #TODO: compute space requirements
7418

    
7419
  vgname = lu.cfg.GetVGName()
7420
  disk_count = len(disk_info)
7421
  disks = []
7422
  if template_name == constants.DT_DISKLESS:
7423
    pass
7424
  elif template_name == constants.DT_PLAIN:
7425
    if len(secondary_nodes) != 0:
7426
      raise errors.ProgrammerError("Wrong template configuration")
7427

    
7428
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
7429
                                      for i in range(disk_count)])
7430
    for idx, disk in enumerate(disk_info):
7431
      disk_index = idx + base_index
7432
      vg = disk.get(constants.IDISK_VG, vgname)
7433
      feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
7434
      disk_dev = objects.Disk(dev_type=constants.LD_LV,
7435
                              size=disk[constants.IDISK_SIZE],
7436
                              logical_id=(vg, names[idx]),
7437
                              iv_name="disk/%d" % disk_index,
7438
                              mode=disk[constants.IDISK_MODE])
7439
      disks.append(disk_dev)
7440
  elif template_name == constants.DT_DRBD8:
7441
    if len(secondary_nodes) != 1:
7442
      raise errors.ProgrammerError("Wrong template configuration")
7443
    remote_node = secondary_nodes[0]
7444
    minors = lu.cfg.AllocateDRBDMinor(
7445
      [primary_node, remote_node] * len(disk_info), instance_name)
7446

    
7447
    names = []
7448
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
7449
                                               for i in range(disk_count)]):
7450
      names.append(lv_prefix + "_data")
7451
      names.append(lv_prefix + "_meta")
7452
    for idx, disk in enumerate(disk_info):
7453
      disk_index = idx + base_index
7454
      data_vg = disk.get(constants.IDISK_VG, vgname)
7455
      meta_vg = disk.get(constants.IDISK_METAVG, data_vg)
7456
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
7457
                                      disk[constants.IDISK_SIZE],
7458
                                      [data_vg, meta_vg],
7459
                                      names[idx * 2:idx * 2 + 2],
7460
                                      "disk/%d" % disk_index,
7461
                                      minors[idx * 2], minors[idx * 2 + 1])
7462
      disk_dev.mode = disk[constants.IDISK_MODE]
7463
      disks.append(disk_dev)
7464
  elif template_name == constants.DT_FILE:
7465
    if len(secondary_nodes) != 0:
7466
      raise errors.ProgrammerError("Wrong template configuration")
7467

    
7468
    opcodes.RequireFileStorage()
7469

    
7470
    for idx, disk in enumerate(disk_info):
7471
      disk_index = idx + base_index
7472
      disk_dev = objects.Disk(dev_type=constants.LD_FILE,
7473
                              size=disk[constants.IDISK_SIZE],
7474
                              iv_name="disk/%d" % disk_index,
7475
                              logical_id=(file_driver,
7476
                                          "%s/disk%d" % (file_storage_dir,
7477
                                                         disk_index)),
7478
                              mode=disk[constants.IDISK_MODE])
7479
      disks.append(disk_dev)
7480
  elif template_name == constants.DT_SHARED_FILE:
7481
    if len(secondary_nodes) != 0:
7482
      raise errors.ProgrammerError("Wrong template configuration")
7483

    
7484
    opcodes.RequireSharedFileStorage()
7485

    
7486
    for idx, disk in enumerate(disk_info):
7487
      disk_index = idx + base_index
7488
      disk_dev = objects.Disk(dev_type=constants.LD_FILE,
7489
                              size=disk[constants.IDISK_SIZE],
7490
                              iv_name="disk/%d" % disk_index,
7491
                              logical_id=(file_driver,
7492
                                          "%s/disk%d" % (file_storage_dir,
7493
                                                         disk_index)),
7494
                              mode=disk[constants.IDISK_MODE])
7495
      disks.append(disk_dev)
7496
  elif template_name == constants.DT_BLOCK:
7497
    if len(secondary_nodes) != 0:
7498
      raise errors.ProgrammerError("Wrong template configuration")
7499

    
7500
    for idx, disk in enumerate(disk_info):
7501
      disk_index = idx + base_index
7502
      disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV,
7503
                              size=disk[constants.IDISK_SIZE],
7504
                              logical_id=(constants.BLOCKDEV_DRIVER_MANUAL,
7505
                                          disk[constants.IDISK_ADOPT]),
7506
                              iv_name="disk/%d" % disk_index,
7507
                              mode=disk[constants.IDISK_MODE])
7508
      disks.append(disk_dev)
7509

    
7510
  else:
7511
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
7512
  return disks
7513

    
7514

    
7515
def _GetInstanceInfoText(instance):
7516
  """Compute that text that should be added to the disk's metadata.
7517

7518
  """
7519
  return "originstname+%s" % instance.name
7520

    
7521

    
7522
def _CalcEta(time_taken, written, total_size):
7523
  """Calculates the ETA based on size written and total size.
7524

7525
  @param time_taken: The time taken so far
7526
  @param written: amount written so far
7527
  @param total_size: The total size of data to be written
7528
  @return: The remaining time in seconds
7529

7530
  """
7531
  avg_time = time_taken / float(written)
7532
  return (total_size - written) * avg_time
7533

    
7534

    
7535
def _WipeDisks(lu, instance):
7536
  """Wipes instance disks.
7537

7538
  @type lu: L{LogicalUnit}
7539
  @param lu: the logical unit on whose behalf we execute
7540
  @type instance: L{objects.Instance}
7541
  @param instance: the instance whose disks we should create
7542
  @return: the success of the wipe
7543

7544
  """
7545
  node = instance.primary_node
7546

    
7547
  for device in instance.disks:
7548
    lu.cfg.SetDiskID(device, node)
7549

    
7550
  logging.info("Pause sync of instance %s disks", instance.name)
7551
  result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
7552

    
7553
  for idx, success in enumerate(result.payload):
7554
    if not success:
7555
      logging.warn("pause-sync of instance %s for disks %d failed",
7556
                   instance.name, idx)
7557

    
7558
  try:
7559
    for idx, device in enumerate(instance.disks):
7560
      # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
7561
      # MAX_WIPE_CHUNK at max
7562
      wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
7563
                            constants.MIN_WIPE_CHUNK_PERCENT)
7564
      # we _must_ make this an int, otherwise rounding errors will
7565
      # occur
7566
      wipe_chunk_size = int(wipe_chunk_size)
7567

    
7568
      lu.LogInfo("* Wiping disk %d", idx)
7569
      logging.info("Wiping disk %d for instance %s, node %s using"
7570
                   " chunk size %s", idx, instance.name, node, wipe_chunk_size)
7571

    
7572
      offset = 0
7573
      size = device.size
7574
      last_output = 0
7575
      start_time = time.time()
7576

    
7577
      while offset < size:
7578
        wipe_size = min(wipe_chunk_size, size - offset)
7579
        logging.debug("Wiping disk %d, offset %s, chunk %s",
7580
                      idx, offset, wipe_size)
7581
        result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
7582
        result.Raise("Could not wipe disk %d at offset %d for size %d" %
7583
                     (idx, offset, wipe_size))
7584
        now = time.time()
7585
        offset += wipe_size
7586
        if now - last_output >= 60:
7587
          eta = _CalcEta(now - start_time, offset, size)
7588
          lu.LogInfo(" - done: %.1f%% ETA: %s" %
7589
                     (offset / float(size) * 100, utils.FormatSeconds(eta)))
7590
          last_output = now
7591
  finally:
7592
    logging.info("Resume sync of instance %s disks", instance.name)
7593

    
7594
    result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, False)
7595

    
7596
    for idx, success in enumerate(result.payload):
7597
      if not success:
7598
        lu.LogWarning("Resume sync of disk %d failed, please have a"
7599
                      " look at the status and troubleshoot the issue", idx)
7600
        logging.warn("resume-sync of instance %s for disks %d failed",
7601
                     instance.name, idx)
7602

    
7603

    
7604
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
7605
  """Create all disks for an instance.
7606

7607
  This abstracts away some work from AddInstance.
7608

7609
  @type lu: L{LogicalUnit}
7610
  @param lu: the logical unit on whose behalf we execute
7611
  @type instance: L{objects.Instance}
7612
  @param instance: the instance whose disks we should create
7613
  @type to_skip: list
7614
  @param to_skip: list of indices to skip
7615
  @type target_node: string
7616
  @param target_node: if passed, overrides the target node for creation
7617
  @rtype: boolean
7618
  @return: the success of the creation
7619

7620
  """
7621
  info = _GetInstanceInfoText(instance)
7622
  if target_node is None:
7623
    pnode = instance.primary_node
7624
    all_nodes = instance.all_nodes
7625
  else:
7626
    pnode = target_node
7627
    all_nodes = [pnode]
7628

    
7629
  if instance.disk_template in constants.DTS_FILEBASED:
7630
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
7631
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
7632

    
7633
    result.Raise("Failed to create directory '%s' on"
7634
                 " node %s" % (file_storage_dir, pnode))
7635

    
7636
  # Note: this needs to be kept in sync with adding of disks in
7637
  # LUInstanceSetParams
7638
  for idx, device in enumerate(instance.disks):
7639
    if to_skip and idx in to_skip:
7640
      continue
7641
    logging.info("Creating volume %s for instance %s",
7642
                 device.iv_name, instance.name)
7643
    #HARDCODE
7644
    for node in all_nodes:
7645
      f_create = node == pnode
7646
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
7647

    
7648

    
7649
def _RemoveDisks(lu, instance, target_node=None):
7650
  """Remove all disks for an instance.
7651

7652
  This abstracts away some work from `AddInstance()` and
7653
  `RemoveInstance()`. Note that in case some of the devices couldn't
7654
  be removed, the removal will continue with the other ones (compare
7655
  with `_CreateDisks()`).
7656

7657
  @type lu: L{LogicalUnit}
7658
  @param lu: the logical unit on whose behalf we execute
7659
  @type instance: L{objects.Instance}
7660
  @param instance: the instance whose disks we should remove
7661
  @type target_node: string
7662
  @param target_node: used to override the node on which to remove the disks
7663
  @rtype: boolean
7664
  @return: the success of the removal
7665

7666
  """
7667
  logging.info("Removing block devices for instance %s", instance.name)
7668

    
7669
  all_result = True
7670
  for device in instance.disks:
7671
    if target_node:
7672
      edata = [(target_node, device)]
7673
    else:
7674
      edata = device.ComputeNodeTree(instance.primary_node)
7675
    for node, disk in edata:
7676
      lu.cfg.SetDiskID(disk, node)
7677
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
7678
      if msg:
7679
        lu.LogWarning("Could not remove block device %s on node %s,"
7680
                      " continuing anyway: %s", device.iv_name, node, msg)
7681
        all_result = False
7682

    
7683
  if instance.disk_template == constants.DT_FILE:
7684
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
7685
    if target_node:
7686
      tgt = target_node
7687
    else:
7688
      tgt = instance.primary_node
7689
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
7690
    if result.fail_msg:
7691
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
7692
                    file_storage_dir, instance.primary_node, result.fail_msg)
7693
      all_result = False
7694

    
7695
  return all_result
7696

    
7697

    
7698
def _ComputeDiskSizePerVG(disk_template, disks):
7699
  """Compute disk size requirements in the volume group
7700

7701
  """
7702
  def _compute(disks, payload):
7703
    """Universal algorithm.
7704

7705
    """
7706
    vgs = {}
7707
    for disk in disks:
7708
      vgs[disk[constants.IDISK_VG]] = \
7709
        vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
7710

    
7711
    return vgs
7712

    
7713
  # Required free disk space as a function of disk and swap space
7714
  req_size_dict = {
7715
    constants.DT_DISKLESS: {},
7716
    constants.DT_PLAIN: _compute(disks, 0),
7717
    # 128 MB are added for drbd metadata for each disk
7718
    constants.DT_DRBD8: _compute(disks, 128),
7719
    constants.DT_FILE: {},
7720
    constants.DT_SHARED_FILE: {},
7721
  }
7722

    
7723
  if disk_template not in req_size_dict:
7724
    raise errors.ProgrammerError("Disk template '%s' size requirement"
7725
                                 " is unknown" %  disk_template)
7726

    
7727
  return req_size_dict[disk_template]
7728

    
7729

    
7730
def _ComputeDiskSize(disk_template, disks):
7731
  """Compute disk size requirements in the volume group
7732

7733
  """
7734
  # Required free disk space as a function of disk and swap space
7735
  req_size_dict = {
7736
    constants.DT_DISKLESS: None,
7737
    constants.DT_PLAIN: sum(d[constants.IDISK_SIZE] for d in disks),
7738
    # 128 MB are added for drbd metadata for each disk
7739
    constants.DT_DRBD8: sum(d[constants.IDISK_SIZE] + 128 for d in disks),
7740
    constants.DT_FILE: None,
7741
    constants.DT_SHARED_FILE: 0,
7742
    constants.DT_BLOCK: 0,
7743
  }
7744

    
7745
  if disk_template not in req_size_dict:
7746
    raise errors.ProgrammerError("Disk template '%s' size requirement"
7747
                                 " is unknown" %  disk_template)
7748

    
7749
  return req_size_dict[disk_template]
7750

    
7751

    
7752
def _FilterVmNodes(lu, nodenames):
7753
  """Filters out non-vm_capable nodes from a list.
7754

7755
  @type lu: L{LogicalUnit}
7756
  @param lu: the logical unit for which we check
7757
  @type nodenames: list
7758
  @param nodenames: the list of nodes on which we should check
7759
  @rtype: list
7760
  @return: the list of vm-capable nodes
7761

7762
  """
7763
  vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
7764
  return [name for name in nodenames if name not in vm_nodes]
7765

    
7766

    
7767
def _CheckHVParams(lu, nodenames, hvname, hvparams):
7768
  """Hypervisor parameter validation.
7769

7770
  This function abstract the hypervisor parameter validation to be
7771
  used in both instance create and instance modify.
7772

7773
  @type lu: L{LogicalUnit}
7774
  @param lu: the logical unit for which we check
7775
  @type nodenames: list
7776
  @param nodenames: the list of nodes on which we should check
7777
  @type hvname: string
7778
  @param hvname: the name of the hypervisor we should use
7779
  @type hvparams: dict
7780
  @param hvparams: the parameters which we need to check
7781
  @raise errors.OpPrereqError: if the parameters are not valid
7782

7783
  """
7784
  nodenames = _FilterVmNodes(lu, nodenames)
7785
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
7786
                                                  hvname,
7787
                                                  hvparams)
7788
  for node in nodenames:
7789
    info = hvinfo[node]
7790
    if info.offline:
7791
      continue
7792
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
7793

    
7794

    
7795
def _CheckOSParams(lu, required, nodenames, osname, osparams):
7796
  """OS parameters validation.
7797

7798
  @type lu: L{LogicalUnit}
7799
  @param lu: the logical unit for which we check
7800
  @type required: boolean
7801
  @param required: whether the validation should fail if the OS is not
7802
      found
7803
  @type nodenames: list
7804
  @param nodenames: the list of nodes on which we should check
7805
  @type osname: string
7806
  @param osname: the name of the hypervisor we should use
7807
  @type osparams: dict
7808
  @param osparams: the parameters which we need to check
7809
  @raise errors.OpPrereqError: if the parameters are not valid
7810

7811
  """
7812
  nodenames = _FilterVmNodes(lu, nodenames)
7813
  result = lu.rpc.call_os_validate(required, nodenames, osname,
7814
                                   [constants.OS_VALIDATE_PARAMETERS],
7815
                                   osparams)
7816
  for node, nres in result.items():
7817
    # we don't check for offline cases since this should be run only
7818
    # against the master node and/or an instance's nodes
7819
    nres.Raise("OS Parameters validation failed on node %s" % node)
7820
    if not nres.payload:
7821
      lu.LogInfo("OS %s not found on node %s, validation skipped",
7822
                 osname, node)
7823

    
7824

    
7825
class LUInstanceCreate(LogicalUnit):
7826
  """Create an instance.
7827

7828
  """
7829
  HPATH = "instance-add"
7830
  HTYPE = constants.HTYPE_INSTANCE
7831
  REQ_BGL = False
7832

    
7833
  def CheckArguments(self):
7834
    """Check arguments.
7835

7836
    """
7837
    # do not require name_check to ease forward/backward compatibility
7838
    # for tools
7839
    if self.op.no_install and self.op.start:
7840
      self.LogInfo("No-installation mode selected, disabling startup")
7841
      self.op.start = False
7842
    # validate/normalize the instance name
7843
    self.op.instance_name = \
7844
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
7845

    
7846
    if self.op.ip_check and not self.op.name_check:
7847
      # TODO: make the ip check more flexible and not depend on the name check
7848
      raise errors.OpPrereqError("Cannot do IP address check without a name"
7849
                                 " check", errors.ECODE_INVAL)
7850

    
7851
    # check nics' parameter names
7852
    for nic in self.op.nics:
7853
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
7854

    
7855
    # check disks. parameter names and consistent adopt/no-adopt strategy
7856
    has_adopt = has_no_adopt = False
7857
    for disk in self.op.disks:
7858
      utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
7859
      if constants.IDISK_ADOPT in disk:
7860
        has_adopt = True
7861
      else:
7862
        has_no_adopt = True
7863
    if has_adopt and has_no_adopt:
7864
      raise errors.OpPrereqError("Either all disks are adopted or none is",
7865
                                 errors.ECODE_INVAL)
7866
    if has_adopt:
7867
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
7868
        raise errors.OpPrereqError("Disk adoption is not supported for the"
7869
                                   " '%s' disk template" %
7870
                                   self.op.disk_template,
7871
                                   errors.ECODE_INVAL)
7872
      if self.op.iallocator is not None:
7873
        raise errors.OpPrereqError("Disk adoption not allowed with an"
7874
                                   " iallocator script", errors.ECODE_INVAL)
7875
      if self.op.mode == constants.INSTANCE_IMPORT:
7876
        raise errors.OpPrereqError("Disk adoption not allowed for"
7877
                                   " instance import", errors.ECODE_INVAL)
7878
    else:
7879
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
7880
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
7881
                                   " but no 'adopt' parameter given" %
7882
                                   self.op.disk_template,
7883
                                   errors.ECODE_INVAL)
7884

    
7885
    self.adopt_disks = has_adopt
7886

    
7887
    # instance name verification
7888
    if self.op.name_check:
7889
      self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
7890
      self.op.instance_name = self.hostname1.name
7891
      # used in CheckPrereq for ip ping check
7892
      self.check_ip = self.hostname1.ip
7893
    else:
7894
      self.check_ip = None
7895

    
7896
    # file storage checks
7897
    if (self.op.file_driver and
7898
        not self.op.file_driver in constants.FILE_DRIVER):
7899
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
7900
                                 self.op.file_driver, errors.ECODE_INVAL)
7901

    
7902
    if self.op.disk_template == constants.DT_FILE:
7903
      opcodes.RequireFileStorage()
7904
    elif self.op.disk_template == constants.DT_SHARED_FILE:
7905
      opcodes.RequireSharedFileStorage()
7906

    
7907
    ### Node/iallocator related checks
7908
    _CheckIAllocatorOrNode(self, "iallocator", "pnode")
7909

    
7910
    if self.op.pnode is not None:
7911
      if self.op.disk_template in constants.DTS_INT_MIRROR:
7912
        if self.op.snode is None:
7913
          raise errors.OpPrereqError("The networked disk templates need"
7914
                                     " a mirror node", errors.ECODE_INVAL)
7915
      elif self.op.snode:
7916
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
7917
                        " template")
7918
        self.op.snode = None
7919

    
7920
    self._cds = _GetClusterDomainSecret()
7921

    
7922
    if self.op.mode == constants.INSTANCE_IMPORT:
7923
      # On import force_variant must be True, because if we forced it at
7924
      # initial install, our only chance when importing it back is that it
7925
      # works again!
7926
      self.op.force_variant = True
7927

    
7928
      if self.op.no_install:
7929
        self.LogInfo("No-installation mode has no effect during import")
7930

    
7931
    elif self.op.mode == constants.INSTANCE_CREATE:
7932
      if self.op.os_type is None:
7933
        raise errors.OpPrereqError("No guest OS specified",
7934
                                   errors.ECODE_INVAL)
7935
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
7936
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
7937
                                   " installation" % self.op.os_type,
7938
                                   errors.ECODE_STATE)
7939
      if self.op.disk_template is None:
7940
        raise errors.OpPrereqError("No disk template specified",
7941
                                   errors.ECODE_INVAL)
7942

    
7943
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7944
      # Check handshake to ensure both clusters have the same domain secret
7945
      src_handshake = self.op.source_handshake
7946
      if not src_handshake:
7947
        raise errors.OpPrereqError("Missing source handshake",
7948
                                   errors.ECODE_INVAL)
7949

    
7950
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
7951
                                                           src_handshake)
7952
      if errmsg:
7953
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
7954
                                   errors.ECODE_INVAL)
7955

    
7956
      # Load and check source CA
7957
      self.source_x509_ca_pem = self.op.source_x509_ca
7958
      if not self.source_x509_ca_pem:
7959
        raise errors.OpPrereqError("Missing source X509 CA",
7960
                                   errors.ECODE_INVAL)
7961

    
7962
      try:
7963
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
7964
                                                    self._cds)
7965
      except OpenSSL.crypto.Error, err:
7966
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
7967
                                   (err, ), errors.ECODE_INVAL)
7968

    
7969
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
7970
      if errcode is not None:
7971
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
7972
                                   errors.ECODE_INVAL)
7973

    
7974
      self.source_x509_ca = cert
7975

    
7976
      src_instance_name = self.op.source_instance_name
7977
      if not src_instance_name:
7978
        raise errors.OpPrereqError("Missing source instance name",
7979
                                   errors.ECODE_INVAL)
7980

    
7981
      self.source_instance_name = \
7982
          netutils.GetHostname(name=src_instance_name).name
7983

    
7984
    else:
7985
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
7986
                                 self.op.mode, errors.ECODE_INVAL)
7987

    
7988
  def ExpandNames(self):
7989
    """ExpandNames for CreateInstance.
7990

7991
    Figure out the right locks for instance creation.
7992

7993
    """
7994
    self.needed_locks = {}
7995

    
7996
    instance_name = self.op.instance_name
7997
    # this is just a preventive check, but someone might still add this
7998
    # instance in the meantime, and creation will fail at lock-add time
7999
    if instance_name in self.cfg.GetInstanceList():
8000
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
8001
                                 instance_name, errors.ECODE_EXISTS)
8002

    
8003
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
8004

    
8005
    if self.op.iallocator:
8006
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8007
    else:
8008
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
8009
      nodelist = [self.op.pnode]
8010
      if self.op.snode is not None:
8011
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
8012
        nodelist.append(self.op.snode)
8013
      self.needed_locks[locking.LEVEL_NODE] = nodelist
8014

    
8015
    # in case of import lock the source node too
8016
    if self.op.mode == constants.INSTANCE_IMPORT:
8017
      src_node = self.op.src_node
8018
      src_path = self.op.src_path
8019

    
8020
      if src_path is None:
8021
        self.op.src_path = src_path = self.op.instance_name
8022

    
8023
      if src_node is None:
8024
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8025
        self.op.src_node = None
8026
        if os.path.isabs(src_path):
8027
          raise errors.OpPrereqError("Importing an instance from an absolute"
8028
                                     " path requires a source node option",
8029
                                     errors.ECODE_INVAL)
8030
      else:
8031
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
8032
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
8033
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
8034
        if not os.path.isabs(src_path):
8035
          self.op.src_path = src_path = \
8036
            utils.PathJoin(constants.EXPORT_DIR, src_path)
8037

    
8038
  def _RunAllocator(self):
8039
    """Run the allocator based on input opcode.
8040

8041
    """
8042
    nics = [n.ToDict() for n in self.nics]
8043
    ial = IAllocator(self.cfg, self.rpc,
8044
                     mode=constants.IALLOCATOR_MODE_ALLOC,
8045
                     name=self.op.instance_name,
8046
                     disk_template=self.op.disk_template,
8047
                     tags=self.op.tags,
8048
                     os=self.op.os_type,
8049
                     vcpus=self.be_full[constants.BE_VCPUS],
8050
                     memory=self.be_full[constants.BE_MEMORY],
8051
                     disks=self.disks,
8052
                     nics=nics,
8053
                     hypervisor=self.op.hypervisor,
8054
                     )
8055

    
8056
    ial.Run(self.op.iallocator)
8057

    
8058
    if not ial.success:
8059
      raise errors.OpPrereqError("Can't compute nodes using"
8060
                                 " iallocator '%s': %s" %
8061
                                 (self.op.iallocator, ial.info),
8062
                                 errors.ECODE_NORES)
8063
    if len(ial.result) != ial.required_nodes:
8064
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8065
                                 " of nodes (%s), required %s" %
8066
                                 (self.op.iallocator, len(ial.result),
8067
                                  ial.required_nodes), errors.ECODE_FAULT)
8068
    self.op.pnode = ial.result[0]
8069
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
8070
                 self.op.instance_name, self.op.iallocator,
8071
                 utils.CommaJoin(ial.result))
8072
    if ial.required_nodes == 2:
8073
      self.op.snode = ial.result[1]
8074

    
8075
  def BuildHooksEnv(self):
8076
    """Build hooks env.
8077

8078
    This runs on master, primary and secondary nodes of the instance.
8079

8080
    """
8081
    env = {
8082
      "ADD_MODE": self.op.mode,
8083
      }
8084
    if self.op.mode == constants.INSTANCE_IMPORT:
8085
      env["SRC_NODE"] = self.op.src_node
8086
      env["SRC_PATH"] = self.op.src_path
8087
      env["SRC_IMAGES"] = self.src_images
8088

    
8089
    env.update(_BuildInstanceHookEnv(
8090
      name=self.op.instance_name,
8091
      primary_node=self.op.pnode,
8092
      secondary_nodes=self.secondaries,
8093
      status=self.op.start,
8094
      os_type=self.op.os_type,
8095
      memory=self.be_full[constants.BE_MEMORY],
8096
      vcpus=self.be_full[constants.BE_VCPUS],
8097
      nics=_NICListToTuple(self, self.nics),
8098
      disk_template=self.op.disk_template,
8099
      disks=[(d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
8100
             for d in self.disks],
8101
      bep=self.be_full,
8102
      hvp=self.hv_full,
8103
      hypervisor_name=self.op.hypervisor,
8104
      tags=self.op.tags,
8105
    ))
8106

    
8107
    return env
8108

    
8109
  def BuildHooksNodes(self):
8110
    """Build hooks nodes.
8111

8112
    """
8113
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
8114
    return nl, nl
8115

    
8116
  def _ReadExportInfo(self):
8117
    """Reads the export information from disk.
8118

8119
    It will override the opcode source node and path with the actual
8120
    information, if these two were not specified before.
8121

8122
    @return: the export information
8123

8124
    """
8125
    assert self.op.mode == constants.INSTANCE_IMPORT
8126

    
8127
    src_node = self.op.src_node
8128
    src_path = self.op.src_path
8129

    
8130
    if src_node is None:
8131
      locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
8132
      exp_list = self.rpc.call_export_list(locked_nodes)
8133
      found = False
8134
      for node in exp_list:
8135
        if exp_list[node].fail_msg:
8136
          continue
8137
        if src_path in exp_list[node].payload:
8138
          found = True
8139
          self.op.src_node = src_node = node
8140
          self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
8141
                                                       src_path)
8142
          break
8143
      if not found:
8144
        raise errors.OpPrereqError("No export found for relative path %s" %
8145
                                    src_path, errors.ECODE_INVAL)
8146

    
8147
    _CheckNodeOnline(self, src_node)
8148
    result = self.rpc.call_export_info(src_node, src_path)
8149
    result.Raise("No export or invalid export found in dir %s" % src_path)
8150

    
8151
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
8152
    if not export_info.has_section(constants.INISECT_EXP):
8153
      raise errors.ProgrammerError("Corrupted export config",
8154
                                   errors.ECODE_ENVIRON)
8155

    
8156
    ei_version = export_info.get(constants.INISECT_EXP, "version")
8157
    if (int(ei_version) != constants.EXPORT_VERSION):
8158
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
8159
                                 (ei_version, constants.EXPORT_VERSION),
8160
                                 errors.ECODE_ENVIRON)
8161
    return export_info
8162

    
8163
  def _ReadExportParams(self, einfo):
8164
    """Use export parameters as defaults.
8165

8166
    In case the opcode doesn't specify (as in override) some instance
8167
    parameters, then try to use them from the export information, if
8168
    that declares them.
8169

8170
    """
8171
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
8172

    
8173
    if self.op.disk_template is None:
8174
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
8175
        self.op.disk_template = einfo.get(constants.INISECT_INS,
8176
                                          "disk_template")
8177
      else:
8178
        raise errors.OpPrereqError("No disk template specified and the export"
8179
                                   " is missing the disk_template information",
8180
                                   errors.ECODE_INVAL)
8181

    
8182
    if not self.op.disks:
8183
      if einfo.has_option(constants.INISECT_INS, "disk_count"):
8184
        disks = []
8185
        # TODO: import the disk iv_name too
8186
        for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
8187
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
8188
          disks.append({constants.IDISK_SIZE: disk_sz})
8189
        self.op.disks = disks
8190
      else:
8191
        raise errors.OpPrereqError("No disk info specified and the export"
8192
                                   " is missing the disk information",
8193
                                   errors.ECODE_INVAL)
8194

    
8195
    if (not self.op.nics and
8196
        einfo.has_option(constants.INISECT_INS, "nic_count")):
8197
      nics = []
8198
      for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
8199
        ndict = {}
8200
        for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
8201
          v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
8202
          ndict[name] = v
8203
        nics.append(ndict)
8204
      self.op.nics = nics
8205

    
8206
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
8207
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
8208

    
8209
    if (self.op.hypervisor is None and
8210
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
8211
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
8212

    
8213
    if einfo.has_section(constants.INISECT_HYP):
8214
      # use the export parameters but do not override the ones
8215
      # specified by the user
8216
      for name, value in einfo.items(constants.INISECT_HYP):
8217
        if name not in self.op.hvparams:
8218
          self.op.hvparams[name] = value
8219

    
8220
    if einfo.has_section(constants.INISECT_BEP):
8221
      # use the parameters, without overriding
8222
      for name, value in einfo.items(constants.INISECT_BEP):
8223
        if name not in self.op.beparams:
8224
          self.op.beparams[name] = value
8225
    else:
8226
      # try to read the parameters old style, from the main section
8227
      for name in constants.BES_PARAMETERS:
8228
        if (name not in self.op.beparams and
8229
            einfo.has_option(constants.INISECT_INS, name)):
8230
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
8231

    
8232
    if einfo.has_section(constants.INISECT_OSP):
8233
      # use the parameters, without overriding
8234
      for name, value in einfo.items(constants.INISECT_OSP):
8235
        if name not in self.op.osparams:
8236
          self.op.osparams[name] = value
8237

    
8238
  def _RevertToDefaults(self, cluster):
8239
    """Revert the instance parameters to the default values.
8240

8241
    """
8242
    # hvparams
8243
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
8244
    for name in self.op.hvparams.keys():
8245
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
8246
        del self.op.hvparams[name]
8247
    # beparams
8248
    be_defs = cluster.SimpleFillBE({})
8249
    for name in self.op.beparams.keys():
8250
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
8251
        del self.op.beparams[name]
8252
    # nic params
8253
    nic_defs = cluster.SimpleFillNIC({})
8254
    for nic in self.op.nics:
8255
      for name in constants.NICS_PARAMETERS:
8256
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
8257
          del nic[name]
8258
    # osparams
8259
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
8260
    for name in self.op.osparams.keys():
8261
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
8262
        del self.op.osparams[name]
8263

    
8264
  def _CalculateFileStorageDir(self):
8265
    """Calculate final instance file storage dir.
8266

8267
    """
8268
    # file storage dir calculation/check
8269
    self.instance_file_storage_dir = None
8270
    if self.op.disk_template in constants.DTS_FILEBASED:
8271
      # build the full file storage dir path
8272
      joinargs = []
8273

    
8274
      if self.op.disk_template == constants.DT_SHARED_FILE:
8275
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
8276
      else:
8277
        get_fsd_fn = self.cfg.GetFileStorageDir
8278

    
8279
      cfg_storagedir = get_fsd_fn()
8280
      if not cfg_storagedir:
8281
        raise errors.OpPrereqError("Cluster file storage dir not defined")
8282
      joinargs.append(cfg_storagedir)
8283

    
8284
      if self.op.file_storage_dir is not None:
8285
        joinargs.append(self.op.file_storage_dir)
8286

    
8287
      joinargs.append(self.op.instance_name)
8288

    
8289
      # pylint: disable-msg=W0142
8290
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
8291

    
8292
  def CheckPrereq(self):
8293
    """Check prerequisites.
8294

8295
    """
8296
    self._CalculateFileStorageDir()
8297

    
8298
    if self.op.mode == constants.INSTANCE_IMPORT:
8299
      export_info = self._ReadExportInfo()
8300
      self._ReadExportParams(export_info)
8301

    
8302
    if (not self.cfg.GetVGName() and
8303
        self.op.disk_template not in constants.DTS_NOT_LVM):
8304
      raise errors.OpPrereqError("Cluster does not support lvm-based"
8305
                                 " instances", errors.ECODE_STATE)
8306

    
8307
    if self.op.hypervisor is None:
8308
      self.op.hypervisor = self.cfg.GetHypervisorType()
8309

    
8310
    cluster = self.cfg.GetClusterInfo()
8311
    enabled_hvs = cluster.enabled_hypervisors
8312
    if self.op.hypervisor not in enabled_hvs:
8313
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
8314
                                 " cluster (%s)" % (self.op.hypervisor,
8315
                                  ",".join(enabled_hvs)),
8316
                                 errors.ECODE_STATE)
8317

    
8318
    # Check tag validity
8319
    for tag in self.op.tags:
8320
      objects.TaggableObject.ValidateTag(tag)
8321

    
8322
    # check hypervisor parameter syntax (locally)
8323
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
8324
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
8325
                                      self.op.hvparams)
8326
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
8327
    hv_type.CheckParameterSyntax(filled_hvp)
8328
    self.hv_full = filled_hvp
8329
    # check that we don't specify global parameters on an instance
8330
    _CheckGlobalHvParams(self.op.hvparams)
8331

    
8332
    # fill and remember the beparams dict
8333
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
8334
    self.be_full = cluster.SimpleFillBE(self.op.beparams)
8335

    
8336
    # build os parameters
8337
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
8338

    
8339
    # now that hvp/bep are in final format, let's reset to defaults,
8340
    # if told to do so
8341
    if self.op.identify_defaults:
8342
      self._RevertToDefaults(cluster)
8343

    
8344
    # NIC buildup
8345
    self.nics = []
8346
    for idx, nic in enumerate(self.op.nics):
8347
      nic_mode_req = nic.get(constants.INIC_MODE, None)
8348
      nic_mode = nic_mode_req
8349
      if nic_mode is None:
8350
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
8351

    
8352
      # in routed mode, for the first nic, the default ip is 'auto'
8353
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
8354
        default_ip_mode = constants.VALUE_AUTO
8355
      else:
8356
        default_ip_mode = constants.VALUE_NONE
8357

    
8358
      # ip validity checks
8359
      ip = nic.get(constants.INIC_IP, default_ip_mode)
8360
      if ip is None or ip.lower() == constants.VALUE_NONE:
8361
        nic_ip = None
8362
      elif ip.lower() == constants.VALUE_AUTO:
8363
        if not self.op.name_check:
8364
          raise errors.OpPrereqError("IP address set to auto but name checks"
8365
                                     " have been skipped",
8366
                                     errors.ECODE_INVAL)
8367
        nic_ip = self.hostname1.ip
8368
      else:
8369
        if not netutils.IPAddress.IsValid(ip):
8370
          raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
8371
                                     errors.ECODE_INVAL)
8372
        nic_ip = ip
8373

    
8374
      # TODO: check the ip address for uniqueness
8375
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
8376
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
8377
                                   errors.ECODE_INVAL)
8378

    
8379
      # MAC address verification
8380
      mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
8381
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8382
        mac = utils.NormalizeAndValidateMac(mac)
8383

    
8384
        try:
8385
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
8386
        except errors.ReservationError:
8387
          raise errors.OpPrereqError("MAC address %s already in use"
8388
                                     " in cluster" % mac,
8389
                                     errors.ECODE_NOTUNIQUE)
8390

    
8391
      #  Build nic parameters
8392
      link = nic.get(constants.INIC_LINK, None)
8393
      nicparams = {}
8394
      if nic_mode_req:
8395
        nicparams[constants.NIC_MODE] = nic_mode_req
8396
      if link:
8397
        nicparams[constants.NIC_LINK] = link
8398

    
8399
      check_params = cluster.SimpleFillNIC(nicparams)
8400
      objects.NIC.CheckParameterSyntax(check_params)
8401
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
8402

    
8403
    # disk checks/pre-build
8404
    default_vg = self.cfg.GetVGName()
8405
    self.disks = []
8406
    for disk in self.op.disks:
8407
      mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
8408
      if mode not in constants.DISK_ACCESS_SET:
8409
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
8410
                                   mode, errors.ECODE_INVAL)
8411
      size = disk.get(constants.IDISK_SIZE, None)
8412
      if size is None:
8413
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
8414
      try:
8415
        size = int(size)
8416
      except (TypeError, ValueError):
8417
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
8418
                                   errors.ECODE_INVAL)
8419

    
8420
      data_vg = disk.get(constants.IDISK_VG, default_vg)
8421
      new_disk = {
8422
        constants.IDISK_SIZE: size,
8423
        constants.IDISK_MODE: mode,
8424
        constants.IDISK_VG: data_vg,
8425
        constants.IDISK_METAVG: disk.get(constants.IDISK_METAVG, data_vg),
8426
        }
8427
      if constants.IDISK_ADOPT in disk:
8428
        new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
8429
      self.disks.append(new_disk)
8430

    
8431
    if self.op.mode == constants.INSTANCE_IMPORT:
8432

    
8433
      # Check that the new instance doesn't have less disks than the export
8434
      instance_disks = len(self.disks)
8435
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
8436
      if instance_disks < export_disks:
8437
        raise errors.OpPrereqError("Not enough disks to import."
8438
                                   " (instance: %d, export: %d)" %
8439
                                   (instance_disks, export_disks),
8440
                                   errors.ECODE_INVAL)
8441

    
8442
      disk_images = []
8443
      for idx in range(export_disks):
8444
        option = "disk%d_dump" % idx
8445
        if export_info.has_option(constants.INISECT_INS, option):
8446
          # FIXME: are the old os-es, disk sizes, etc. useful?
8447
          export_name = export_info.get(constants.INISECT_INS, option)
8448
          image = utils.PathJoin(self.op.src_path, export_name)
8449
          disk_images.append(image)
8450
        else:
8451
          disk_images.append(False)
8452

    
8453
      self.src_images = disk_images
8454

    
8455
      old_name = export_info.get(constants.INISECT_INS, "name")
8456
      try:
8457
        exp_nic_count = export_info.getint(constants.INISECT_INS, "nic_count")
8458
      except (TypeError, ValueError), err:
8459
        raise errors.OpPrereqError("Invalid export file, nic_count is not"
8460
                                   " an integer: %s" % str(err),
8461
                                   errors.ECODE_STATE)
8462
      if self.op.instance_name == old_name:
8463
        for idx, nic in enumerate(self.nics):
8464
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
8465
            nic_mac_ini = "nic%d_mac" % idx
8466
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
8467

    
8468
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
8469

    
8470
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
8471
    if self.op.ip_check:
8472
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
8473
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
8474
                                   (self.check_ip, self.op.instance_name),
8475
                                   errors.ECODE_NOTUNIQUE)
8476

    
8477
    #### mac address generation
8478
    # By generating here the mac address both the allocator and the hooks get
8479
    # the real final mac address rather than the 'auto' or 'generate' value.
8480
    # There is a race condition between the generation and the instance object
8481
    # creation, which means that we know the mac is valid now, but we're not
8482
    # sure it will be when we actually add the instance. If things go bad
8483
    # adding the instance will abort because of a duplicate mac, and the
8484
    # creation job will fail.
8485
    for nic in self.nics:
8486
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8487
        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
8488

    
8489
    #### allocator run
8490

    
8491
    if self.op.iallocator is not None:
8492
      self._RunAllocator()
8493

    
8494
    #### node related checks
8495

    
8496
    # check primary node
8497
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
8498
    assert self.pnode is not None, \
8499
      "Cannot retrieve locked node %s" % self.op.pnode
8500
    if pnode.offline:
8501
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
8502
                                 pnode.name, errors.ECODE_STATE)
8503
    if pnode.drained:
8504
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
8505
                                 pnode.name, errors.ECODE_STATE)
8506
    if not pnode.vm_capable:
8507
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
8508
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
8509

    
8510
    self.secondaries = []
8511

    
8512
    # mirror node verification
8513
    if self.op.disk_template in constants.DTS_INT_MIRROR:
8514
      if self.op.snode == pnode.name:
8515
        raise errors.OpPrereqError("The secondary node cannot be the"
8516
                                   " primary node", errors.ECODE_INVAL)
8517
      _CheckNodeOnline(self, self.op.snode)
8518
      _CheckNodeNotDrained(self, self.op.snode)
8519
      _CheckNodeVmCapable(self, self.op.snode)
8520
      self.secondaries.append(self.op.snode)
8521

    
8522
    nodenames = [pnode.name] + self.secondaries
8523

    
8524
    if not self.adopt_disks:
8525
      # Check lv size requirements, if not adopting
8526
      req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
8527
      _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
8528

    
8529
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
8530
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
8531
                                disk[constants.IDISK_ADOPT])
8532
                     for disk in self.disks])
8533
      if len(all_lvs) != len(self.disks):
8534
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
8535
                                   errors.ECODE_INVAL)
8536
      for lv_name in all_lvs:
8537
        try:
8538
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
8539
          # to ReserveLV uses the same syntax
8540
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
8541
        except errors.ReservationError:
8542
          raise errors.OpPrereqError("LV named %s used by another instance" %
8543
                                     lv_name, errors.ECODE_NOTUNIQUE)
8544

    
8545
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
8546
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
8547

    
8548
      node_lvs = self.rpc.call_lv_list([pnode.name],
8549
                                       vg_names.payload.keys())[pnode.name]
8550
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
8551
      node_lvs = node_lvs.payload
8552

    
8553
      delta = all_lvs.difference(node_lvs.keys())
8554
      if delta:
8555
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
8556
                                   utils.CommaJoin(delta),
8557
                                   errors.ECODE_INVAL)
8558
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
8559
      if online_lvs:
8560
        raise errors.OpPrereqError("Online logical volumes found, cannot"
8561
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
8562
                                   errors.ECODE_STATE)
8563
      # update the size of disk based on what is found
8564
      for dsk in self.disks:
8565
        dsk[constants.IDISK_SIZE] = \
8566
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
8567
                                        dsk[constants.IDISK_ADOPT])][0]))
8568

    
8569
    elif self.op.disk_template == constants.DT_BLOCK:
8570
      # Normalize and de-duplicate device paths
8571
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
8572
                       for disk in self.disks])
8573
      if len(all_disks) != len(self.disks):
8574
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
8575
                                   errors.ECODE_INVAL)
8576
      baddisks = [d for d in all_disks
8577
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
8578
      if baddisks:
8579
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
8580
                                   " cannot be adopted" %
8581
                                   (", ".join(baddisks),
8582
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
8583
                                   errors.ECODE_INVAL)
8584

    
8585
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
8586
                                            list(all_disks))[pnode.name]
8587
      node_disks.Raise("Cannot get block device information from node %s" %
8588
                       pnode.name)
8589
      node_disks = node_disks.payload
8590
      delta = all_disks.difference(node_disks.keys())
8591
      if delta:
8592
        raise errors.OpPrereqError("Missing block device(s): %s" %
8593
                                   utils.CommaJoin(delta),
8594
                                   errors.ECODE_INVAL)
8595
      for dsk in self.disks:
8596
        dsk[constants.IDISK_SIZE] = \
8597
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
8598

    
8599
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
8600

    
8601
    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
8602
    # check OS parameters (remotely)
8603
    _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
8604

    
8605
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
8606

    
8607
    # memory check on primary node
8608
    if self.op.start:
8609
      _CheckNodeFreeMemory(self, self.pnode.name,
8610
                           "creating instance %s" % self.op.instance_name,
8611
                           self.be_full[constants.BE_MEMORY],
8612
                           self.op.hypervisor)
8613

    
8614
    self.dry_run_result = list(nodenames)
8615

    
8616
  def Exec(self, feedback_fn):
8617
    """Create and add the instance to the cluster.
8618

8619
    """
8620
    instance = self.op.instance_name
8621
    pnode_name = self.pnode.name
8622

    
8623
    ht_kind = self.op.hypervisor
8624
    if ht_kind in constants.HTS_REQ_PORT:
8625
      network_port = self.cfg.AllocatePort()
8626
    else:
8627
      network_port = None
8628

    
8629
    disks = _GenerateDiskTemplate(self,
8630
                                  self.op.disk_template,
8631
                                  instance, pnode_name,
8632
                                  self.secondaries,
8633
                                  self.disks,
8634
                                  self.instance_file_storage_dir,
8635
                                  self.op.file_driver,
8636
                                  0,
8637
                                  feedback_fn)
8638

    
8639
    iobj = objects.Instance(name=instance, os=self.op.os_type,
8640
                            primary_node=pnode_name,
8641
                            nics=self.nics, disks=disks,
8642
                            disk_template=self.op.disk_template,
8643
                            admin_up=False,
8644
                            network_port=network_port,
8645
                            beparams=self.op.beparams,
8646
                            hvparams=self.op.hvparams,
8647
                            hypervisor=self.op.hypervisor,
8648
                            osparams=self.op.osparams,
8649
                            )
8650

    
8651
    if self.op.tags:
8652
      for tag in self.op.tags:
8653
        iobj.AddTag(tag)
8654

    
8655
    if self.adopt_disks:
8656
      if self.op.disk_template == constants.DT_PLAIN:
8657
        # rename LVs to the newly-generated names; we need to construct
8658
        # 'fake' LV disks with the old data, plus the new unique_id
8659
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
8660
        rename_to = []
8661
        for t_dsk, a_dsk in zip (tmp_disks, self.disks):
8662
          rename_to.append(t_dsk.logical_id)
8663
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
8664
          self.cfg.SetDiskID(t_dsk, pnode_name)
8665
        result = self.rpc.call_blockdev_rename(pnode_name,
8666
                                               zip(tmp_disks, rename_to))
8667
        result.Raise("Failed to rename adoped LVs")
8668
    else:
8669
      feedback_fn("* creating instance disks...")
8670
      try:
8671
        _CreateDisks(self, iobj)
8672
      except errors.OpExecError:
8673
        self.LogWarning("Device creation failed, reverting...")
8674
        try:
8675
          _RemoveDisks(self, iobj)
8676
        finally:
8677
          self.cfg.ReleaseDRBDMinors(instance)
8678
          raise
8679

    
8680
    feedback_fn("adding instance %s to cluster config" % instance)
8681

    
8682
    self.cfg.AddInstance(iobj, self.proc.GetECId())
8683

    
8684
    # Declare that we don't want to remove the instance lock anymore, as we've
8685
    # added the instance to the config
8686
    del self.remove_locks[locking.LEVEL_INSTANCE]
8687

    
8688
    if self.op.mode == constants.INSTANCE_IMPORT:
8689
      # Release unused nodes
8690
      _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
8691
    else:
8692
      # Release all nodes
8693
      _ReleaseLocks(self, locking.LEVEL_NODE)
8694

    
8695
    disk_abort = False
8696
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
8697
      feedback_fn("* wiping instance disks...")
8698
      try:
8699
        _WipeDisks(self, iobj)
8700
      except errors.OpExecError, err:
8701
        logging.exception("Wiping disks failed")
8702
        self.LogWarning("Wiping instance disks failed (%s)", err)
8703
        disk_abort = True
8704

    
8705
    if disk_abort:
8706
      # Something is already wrong with the disks, don't do anything else
8707
      pass
8708
    elif self.op.wait_for_sync:
8709
      disk_abort = not _WaitForSync(self, iobj)
8710
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
8711
      # make sure the disks are not degraded (still sync-ing is ok)
8712
      time.sleep(15)
8713
      feedback_fn("* checking mirrors status")
8714
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
8715
    else:
8716
      disk_abort = False
8717

    
8718
    if disk_abort:
8719
      _RemoveDisks(self, iobj)
8720
      self.cfg.RemoveInstance(iobj.name)
8721
      # Make sure the instance lock gets removed
8722
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
8723
      raise errors.OpExecError("There are some degraded disks for"
8724
                               " this instance")
8725

    
8726
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
8727
      if self.op.mode == constants.INSTANCE_CREATE:
8728
        if not self.op.no_install:
8729
          feedback_fn("* running the instance OS create scripts...")
8730
          # FIXME: pass debug option from opcode to backend
8731
          result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
8732
                                                 self.op.debug_level)
8733
          result.Raise("Could not add os for instance %s"
8734
                       " on node %s" % (instance, pnode_name))
8735

    
8736
      elif self.op.mode == constants.INSTANCE_IMPORT:
8737
        feedback_fn("* running the instance OS import scripts...")
8738

    
8739
        transfers = []
8740

    
8741
        for idx, image in enumerate(self.src_images):
8742
          if not image:
8743
            continue
8744

    
8745
          # FIXME: pass debug option from opcode to backend
8746
          dt = masterd.instance.DiskTransfer("disk/%s" % idx,
8747
                                             constants.IEIO_FILE, (image, ),
8748
                                             constants.IEIO_SCRIPT,
8749
                                             (iobj.disks[idx], idx),
8750
                                             None)
8751
          transfers.append(dt)
8752

    
8753
        import_result = \
8754
          masterd.instance.TransferInstanceData(self, feedback_fn,
8755
                                                self.op.src_node, pnode_name,
8756
                                                self.pnode.secondary_ip,
8757
                                                iobj, transfers)
8758
        if not compat.all(import_result):
8759
          self.LogWarning("Some disks for instance %s on node %s were not"
8760
                          " imported successfully" % (instance, pnode_name))
8761

    
8762
      elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
8763
        feedback_fn("* preparing remote import...")
8764
        # The source cluster will stop the instance before attempting to make a
8765
        # connection. In some cases stopping an instance can take a long time,
8766
        # hence the shutdown timeout is added to the connection timeout.
8767
        connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
8768
                           self.op.source_shutdown_timeout)
8769
        timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
8770

    
8771
        assert iobj.primary_node == self.pnode.name
8772
        disk_results = \
8773
          masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
8774
                                        self.source_x509_ca,
8775
                                        self._cds, timeouts)
8776
        if not compat.all(disk_results):
8777
          # TODO: Should the instance still be started, even if some disks
8778
          # failed to import (valid for local imports, too)?
8779
          self.LogWarning("Some disks for instance %s on node %s were not"
8780
                          " imported successfully" % (instance, pnode_name))
8781

    
8782
        # Run rename script on newly imported instance
8783
        assert iobj.name == instance
8784
        feedback_fn("Running rename script for %s" % instance)
8785
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
8786
                                                   self.source_instance_name,
8787
                                                   self.op.debug_level)
8788
        if result.fail_msg:
8789
          self.LogWarning("Failed to run rename script for %s on node"
8790
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
8791

    
8792
      else:
8793
        # also checked in the prereq part
8794
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
8795
                                     % self.op.mode)
8796

    
8797
    if self.op.start:
8798
      iobj.admin_up = True
8799
      self.cfg.Update(iobj, feedback_fn)
8800
      logging.info("Starting instance %s on node %s", instance, pnode_name)
8801
      feedback_fn("* starting instance...")
8802
      result = self.rpc.call_instance_start(pnode_name, iobj,
8803
                                            None, None, False)
8804
      result.Raise("Could not start instance")
8805

    
8806
    return list(iobj.all_nodes)
8807

    
8808

    
8809
class LUInstanceConsole(NoHooksLU):
8810
  """Connect to an instance's console.
8811

8812
  This is somewhat special in that it returns the command line that
8813
  you need to run on the master node in order to connect to the
8814
  console.
8815

8816
  """
8817
  REQ_BGL = False
8818

    
8819
  def ExpandNames(self):
8820
    self._ExpandAndLockInstance()
8821

    
8822
  def CheckPrereq(self):
8823
    """Check prerequisites.
8824

8825
    This checks that the instance is in the cluster.
8826

8827
    """
8828
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8829
    assert self.instance is not None, \
8830
      "Cannot retrieve locked instance %s" % self.op.instance_name
8831
    _CheckNodeOnline(self, self.instance.primary_node)
8832

    
8833
  def Exec(self, feedback_fn):
8834
    """Connect to the console of an instance
8835

8836
    """
8837
    instance = self.instance
8838
    node = instance.primary_node
8839

    
8840
    node_insts = self.rpc.call_instance_list([node],
8841
                                             [instance.hypervisor])[node]
8842
    node_insts.Raise("Can't get node information from %s" % node)
8843

    
8844
    if instance.name not in node_insts.payload:
8845
      if instance.admin_up:
8846
        state = constants.INSTST_ERRORDOWN
8847
      else:
8848
        state = constants.INSTST_ADMINDOWN
8849
      raise errors.OpExecError("Instance %s is not running (state %s)" %
8850
                               (instance.name, state))
8851

    
8852
    logging.debug("Connecting to console of %s on %s", instance.name, node)
8853

    
8854
    return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
8855

    
8856

    
8857
def _GetInstanceConsole(cluster, instance):
8858
  """Returns console information for an instance.
8859

8860
  @type cluster: L{objects.Cluster}
8861
  @type instance: L{objects.Instance}
8862
  @rtype: dict
8863

8864
  """
8865
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
8866
  # beparams and hvparams are passed separately, to avoid editing the
8867
  # instance and then saving the defaults in the instance itself.
8868
  hvparams = cluster.FillHV(instance)
8869
  beparams = cluster.FillBE(instance)
8870
  console = hyper.GetInstanceConsole(instance, hvparams, beparams)
8871

    
8872
  assert console.instance == instance.name
8873
  assert console.Validate()
8874

    
8875
  return console.ToDict()
8876

    
8877

    
8878
class LUInstanceReplaceDisks(LogicalUnit):
8879
  """Replace the disks of an instance.
8880

8881
  """
8882
  HPATH = "mirrors-replace"
8883
  HTYPE = constants.HTYPE_INSTANCE
8884
  REQ_BGL = False
8885

    
8886
  def CheckArguments(self):
8887
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
8888
                                  self.op.iallocator)
8889

    
8890
  def ExpandNames(self):
8891
    self._ExpandAndLockInstance()
8892

    
8893
    assert locking.LEVEL_NODE not in self.needed_locks
8894
    assert locking.LEVEL_NODEGROUP not in self.needed_locks
8895

    
8896
    assert self.op.iallocator is None or self.op.remote_node is None, \
8897
      "Conflicting options"
8898

    
8899
    if self.op.remote_node is not None:
8900
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8901

    
8902
      # Warning: do not remove the locking of the new secondary here
8903
      # unless DRBD8.AddChildren is changed to work in parallel;
8904
      # currently it doesn't since parallel invocations of
8905
      # FindUnusedMinor will conflict
8906
      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
8907
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
8908
    else:
8909
      self.needed_locks[locking.LEVEL_NODE] = []
8910
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8911

    
8912
      if self.op.iallocator is not None:
8913
        # iallocator will select a new node in the same group
8914
        self.needed_locks[locking.LEVEL_NODEGROUP] = []
8915

    
8916
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
8917
                                   self.op.iallocator, self.op.remote_node,
8918
                                   self.op.disks, False, self.op.early_release)
8919

    
8920
    self.tasklets = [self.replacer]
8921

    
8922
  def DeclareLocks(self, level):
8923
    if level == locking.LEVEL_NODEGROUP:
8924
      assert self.op.remote_node is None
8925
      assert self.op.iallocator is not None
8926
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
8927

    
8928
      self.share_locks[locking.LEVEL_NODEGROUP] = 1
8929
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
8930
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
8931

    
8932
    elif level == locking.LEVEL_NODE:
8933
      if self.op.iallocator is not None:
8934
        assert self.op.remote_node is None
8935
        assert not self.needed_locks[locking.LEVEL_NODE]
8936

    
8937
        # Lock member nodes of all locked groups
8938
        self.needed_locks[locking.LEVEL_NODE] = [node_name
8939
          for group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
8940
          for node_name in self.cfg.GetNodeGroup(group_uuid).members]
8941
      else:
8942
        self._LockInstancesNodes()
8943

    
8944
  def BuildHooksEnv(self):
8945
    """Build hooks env.
8946

8947
    This runs on the master, the primary and all the secondaries.
8948

8949
    """
8950
    instance = self.replacer.instance
8951
    env = {
8952
      "MODE": self.op.mode,
8953
      "NEW_SECONDARY": self.op.remote_node,
8954
      "OLD_SECONDARY": instance.secondary_nodes[0],
8955
      }
8956
    env.update(_BuildInstanceHookEnvByObject(self, instance))
8957
    return env
8958

    
8959
  def BuildHooksNodes(self):
8960
    """Build hooks nodes.
8961

8962
    """
8963
    instance = self.replacer.instance
8964
    nl = [
8965
      self.cfg.GetMasterNode(),
8966
      instance.primary_node,
8967
      ]
8968
    if self.op.remote_node is not None:
8969
      nl.append(self.op.remote_node)
8970
    return nl, nl
8971

    
8972
  def CheckPrereq(self):
8973
    """Check prerequisites.
8974

8975
    """
8976
    assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
8977
            self.op.iallocator is None)
8978

    
8979
    owned_groups = self.glm.list_owned(locking.LEVEL_NODEGROUP)
8980
    if owned_groups:
8981
      groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
8982
      if owned_groups != groups:
8983
        raise errors.OpExecError("Node groups used by instance '%s' changed"
8984
                                 " since lock was acquired, current list is %r,"
8985
                                 " used to be '%s'" %
8986
                                 (self.op.instance_name,
8987
                                  utils.CommaJoin(groups),
8988
                                  utils.CommaJoin(owned_groups)))
8989

    
8990
    return LogicalUnit.CheckPrereq(self)
8991

    
8992

    
8993
class TLReplaceDisks(Tasklet):
8994
  """Replaces disks for an instance.
8995

8996
  Note: Locking is not within the scope of this class.
8997

8998
  """
8999
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
9000
               disks, delay_iallocator, early_release):
9001
    """Initializes this class.
9002

9003
    """
9004
    Tasklet.__init__(self, lu)
9005

    
9006
    # Parameters
9007
    self.instance_name = instance_name
9008
    self.mode = mode
9009
    self.iallocator_name = iallocator_name
9010
    self.remote_node = remote_node
9011
    self.disks = disks
9012
    self.delay_iallocator = delay_iallocator
9013
    self.early_release = early_release
9014

    
9015
    # Runtime data
9016
    self.instance = None
9017
    self.new_node = None
9018
    self.target_node = None
9019
    self.other_node = None
9020
    self.remote_node_info = None
9021
    self.node_secondary_ip = None
9022

    
9023
  @staticmethod
9024
  def CheckArguments(mode, remote_node, iallocator):
9025
    """Helper function for users of this class.
9026

9027
    """
9028
    # check for valid parameter combination
9029
    if mode == constants.REPLACE_DISK_CHG:
9030
      if remote_node is None and iallocator is None:
9031
        raise errors.OpPrereqError("When changing the secondary either an"
9032
                                   " iallocator script must be used or the"
9033
                                   " new node given", errors.ECODE_INVAL)
9034

    
9035
      if remote_node is not None and iallocator is not None:
9036
        raise errors.OpPrereqError("Give either the iallocator or the new"
9037
                                   " secondary, not both", errors.ECODE_INVAL)
9038

    
9039
    elif remote_node is not None or iallocator is not None:
9040
      # Not replacing the secondary
9041
      raise errors.OpPrereqError("The iallocator and new node options can"
9042
                                 " only be used when changing the"
9043
                                 " secondary node", errors.ECODE_INVAL)
9044

    
9045
  @staticmethod
9046
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
9047
    """Compute a new secondary node using an IAllocator.
9048

9049
    """
9050
    ial = IAllocator(lu.cfg, lu.rpc,
9051
                     mode=constants.IALLOCATOR_MODE_RELOC,
9052
                     name=instance_name,
9053
                     relocate_from=relocate_from)
9054

    
9055
    ial.Run(iallocator_name)
9056

    
9057
    if not ial.success:
9058
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
9059
                                 " %s" % (iallocator_name, ial.info),
9060
                                 errors.ECODE_NORES)
9061

    
9062
    if len(ial.result) != ial.required_nodes:
9063
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
9064
                                 " of nodes (%s), required %s" %
9065
                                 (iallocator_name,
9066
                                  len(ial.result), ial.required_nodes),
9067
                                 errors.ECODE_FAULT)
9068

    
9069
    remote_node_name = ial.result[0]
9070

    
9071
    lu.LogInfo("Selected new secondary for instance '%s': %s",
9072
               instance_name, remote_node_name)
9073

    
9074
    return remote_node_name
9075

    
9076
  def _FindFaultyDisks(self, node_name):
9077
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
9078
                                    node_name, True)
9079

    
9080
  def _CheckDisksActivated(self, instance):
9081
    """Checks if the instance disks are activated.
9082

9083
    @param instance: The instance to check disks
9084
    @return: True if they are activated, False otherwise
9085

9086
    """
9087
    nodes = instance.all_nodes
9088

    
9089
    for idx, dev in enumerate(instance.disks):
9090
      for node in nodes:
9091
        self.lu.LogInfo("Checking disk/%d on %s", idx, node)
9092
        self.cfg.SetDiskID(dev, node)
9093

    
9094
        result = self.rpc.call_blockdev_find(node, dev)
9095

    
9096
        if result.offline:
9097
          continue
9098
        elif result.fail_msg or not result.payload:
9099
          return False
9100

    
9101
    return True
9102

    
9103
  def CheckPrereq(self):
9104
    """Check prerequisites.
9105

9106
    This checks that the instance is in the cluster.
9107

9108
    """
9109
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
9110
    assert instance is not None, \
9111
      "Cannot retrieve locked instance %s" % self.instance_name
9112

    
9113
    if instance.disk_template != constants.DT_DRBD8:
9114
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
9115
                                 " instances", errors.ECODE_INVAL)
9116

    
9117
    if len(instance.secondary_nodes) != 1:
9118
      raise errors.OpPrereqError("The instance has a strange layout,"
9119
                                 " expected one secondary but found %d" %
9120
                                 len(instance.secondary_nodes),
9121
                                 errors.ECODE_FAULT)
9122

    
9123
    if not self.delay_iallocator:
9124
      self._CheckPrereq2()
9125

    
9126
  def _CheckPrereq2(self):
9127
    """Check prerequisites, second part.
9128

9129
    This function should always be part of CheckPrereq. It was separated and is
9130
    now called from Exec because during node evacuation iallocator was only
9131
    called with an unmodified cluster model, not taking planned changes into
9132
    account.
9133

9134
    """
9135
    instance = self.instance
9136
    secondary_node = instance.secondary_nodes[0]
9137

    
9138
    if self.iallocator_name is None:
9139
      remote_node = self.remote_node
9140
    else:
9141
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
9142
                                       instance.name, instance.secondary_nodes)
9143

    
9144
    if remote_node is None:
9145
      self.remote_node_info = None
9146
    else:
9147
      assert remote_node in self.lu.glm.list_owned(locking.LEVEL_NODE), \
9148
             "Remote node '%s' is not locked" % remote_node
9149

    
9150
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
9151
      assert self.remote_node_info is not None, \
9152
        "Cannot retrieve locked node %s" % remote_node
9153

    
9154
    if remote_node == self.instance.primary_node:
9155
      raise errors.OpPrereqError("The specified node is the primary node of"
9156
                                 " the instance", errors.ECODE_INVAL)
9157

    
9158
    if remote_node == secondary_node:
9159
      raise errors.OpPrereqError("The specified node is already the"
9160
                                 " secondary node of the instance",
9161
                                 errors.ECODE_INVAL)
9162

    
9163
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
9164
                                    constants.REPLACE_DISK_CHG):
9165
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
9166
                                 errors.ECODE_INVAL)
9167

    
9168
    if self.mode == constants.REPLACE_DISK_AUTO:
9169
      if not self._CheckDisksActivated(instance):
9170
        raise errors.OpPrereqError("Please run activate-disks on instance %s"
9171
                                   " first" % self.instance_name,
9172
                                   errors.ECODE_STATE)
9173
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
9174
      faulty_secondary = self._FindFaultyDisks(secondary_node)
9175

    
9176
      if faulty_primary and faulty_secondary:
9177
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
9178
                                   " one node and can not be repaired"
9179
                                   " automatically" % self.instance_name,
9180
                                   errors.ECODE_STATE)
9181

    
9182
      if faulty_primary:
9183
        self.disks = faulty_primary
9184
        self.target_node = instance.primary_node
9185
        self.other_node = secondary_node
9186
        check_nodes = [self.target_node, self.other_node]
9187
      elif faulty_secondary:
9188
        self.disks = faulty_secondary
9189
        self.target_node = secondary_node
9190
        self.other_node = instance.primary_node
9191
        check_nodes = [self.target_node, self.other_node]
9192
      else:
9193
        self.disks = []
9194
        check_nodes = []
9195

    
9196
    else:
9197
      # Non-automatic modes
9198
      if self.mode == constants.REPLACE_DISK_PRI:
9199
        self.target_node = instance.primary_node
9200
        self.other_node = secondary_node
9201
        check_nodes = [self.target_node, self.other_node]
9202

    
9203
      elif self.mode == constants.REPLACE_DISK_SEC:
9204
        self.target_node = secondary_node
9205
        self.other_node = instance.primary_node
9206
        check_nodes = [self.target_node, self.other_node]
9207

    
9208
      elif self.mode == constants.REPLACE_DISK_CHG:
9209
        self.new_node = remote_node
9210
        self.other_node = instance.primary_node
9211
        self.target_node = secondary_node
9212
        check_nodes = [self.new_node, self.other_node]
9213

    
9214
        _CheckNodeNotDrained(self.lu, remote_node)
9215
        _CheckNodeVmCapable(self.lu, remote_node)
9216

    
9217
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
9218
        assert old_node_info is not None
9219
        if old_node_info.offline and not self.early_release:
9220
          # doesn't make sense to delay the release
9221
          self.early_release = True
9222
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
9223
                          " early-release mode", secondary_node)
9224

    
9225
      else:
9226
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
9227
                                     self.mode)
9228

    
9229
      # If not specified all disks should be replaced
9230
      if not self.disks:
9231
        self.disks = range(len(self.instance.disks))
9232

    
9233
    for node in check_nodes:
9234
      _CheckNodeOnline(self.lu, node)
9235

    
9236
    touched_nodes = frozenset(node_name for node_name in [self.new_node,
9237
                                                          self.other_node,
9238
                                                          self.target_node]
9239
                              if node_name is not None)
9240

    
9241
    # Release unneeded node locks
9242
    _ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
9243

    
9244
    # Release any owned node group
9245
    if self.lu.glm.is_owned(locking.LEVEL_NODEGROUP):
9246
      _ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
9247

    
9248
    # Check whether disks are valid
9249
    for disk_idx in self.disks:
9250
      instance.FindDisk(disk_idx)
9251

    
9252
    # Get secondary node IP addresses
9253
    self.node_secondary_ip = \
9254
      dict((node_name, self.cfg.GetNodeInfo(node_name).secondary_ip)
9255
           for node_name in touched_nodes)
9256

    
9257
  def Exec(self, feedback_fn):
9258
    """Execute disk replacement.
9259

9260
    This dispatches the disk replacement to the appropriate handler.
9261

9262
    """
9263
    if self.delay_iallocator:
9264
      self._CheckPrereq2()
9265

    
9266
    if __debug__:
9267
      # Verify owned locks before starting operation
9268
      owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
9269
      assert set(owned_locks) == set(self.node_secondary_ip), \
9270
          ("Incorrect node locks, owning %s, expected %s" %
9271
           (owned_locks, self.node_secondary_ip.keys()))
9272

    
9273
      owned_locks = self.lu.glm.list_owned(locking.LEVEL_INSTANCE)
9274
      assert list(owned_locks) == [self.instance_name], \
9275
          "Instance '%s' not locked" % self.instance_name
9276

    
9277
      assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
9278
          "Should not own any node group lock at this point"
9279

    
9280
    if not self.disks:
9281
      feedback_fn("No disks need replacement")
9282
      return
9283

    
9284
    feedback_fn("Replacing disk(s) %s for %s" %
9285
                (utils.CommaJoin(self.disks), self.instance.name))
9286

    
9287
    activate_disks = (not self.instance.admin_up)
9288

    
9289
    # Activate the instance disks if we're replacing them on a down instance
9290
    if activate_disks:
9291
      _StartInstanceDisks(self.lu, self.instance, True)
9292

    
9293
    try:
9294
      # Should we replace the secondary node?
9295
      if self.new_node is not None:
9296
        fn = self._ExecDrbd8Secondary
9297
      else:
9298
        fn = self._ExecDrbd8DiskOnly
9299

    
9300
      result = fn(feedback_fn)
9301
    finally:
9302
      # Deactivate the instance disks if we're replacing them on a
9303
      # down instance
9304
      if activate_disks:
9305
        _SafeShutdownInstanceDisks(self.lu, self.instance)
9306

    
9307
    if __debug__:
9308
      # Verify owned locks
9309
      owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
9310
      nodes = frozenset(self.node_secondary_ip)
9311
      assert ((self.early_release and not owned_locks) or
9312
              (not self.early_release and not (set(owned_locks) - nodes))), \
9313
        ("Not owning the correct locks, early_release=%s, owned=%r,"
9314
         " nodes=%r" % (self.early_release, owned_locks, nodes))
9315

    
9316
    return result
9317

    
9318
  def _CheckVolumeGroup(self, nodes):
9319
    self.lu.LogInfo("Checking volume groups")
9320

    
9321
    vgname = self.cfg.GetVGName()
9322

    
9323
    # Make sure volume group exists on all involved nodes
9324
    results = self.rpc.call_vg_list(nodes)
9325
    if not results:
9326
      raise errors.OpExecError("Can't list volume groups on the nodes")
9327

    
9328
    for node in nodes:
9329
      res = results[node]
9330
      res.Raise("Error checking node %s" % node)
9331
      if vgname not in res.payload:
9332
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
9333
                                 (vgname, node))
9334

    
9335
  def _CheckDisksExistence(self, nodes):
9336
    # Check disk existence
9337
    for idx, dev in enumerate(self.instance.disks):
9338
      if idx not in self.disks:
9339
        continue
9340

    
9341
      for node in nodes:
9342
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
9343
        self.cfg.SetDiskID(dev, node)
9344

    
9345
        result = self.rpc.call_blockdev_find(node, dev)
9346

    
9347
        msg = result.fail_msg
9348
        if msg or not result.payload:
9349
          if not msg:
9350
            msg = "disk not found"
9351
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
9352
                                   (idx, node, msg))
9353

    
9354
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
9355
    for idx, dev in enumerate(self.instance.disks):
9356
      if idx not in self.disks:
9357
        continue
9358

    
9359
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
9360
                      (idx, node_name))
9361

    
9362
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
9363
                                   ldisk=ldisk):
9364
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
9365
                                 " replace disks for instance %s" %
9366
                                 (node_name, self.instance.name))
9367

    
9368
  def _CreateNewStorage(self, node_name):
9369
    """Create new storage on the primary or secondary node.
9370

9371
    This is only used for same-node replaces, not for changing the
9372
    secondary node, hence we don't want to modify the existing disk.
9373

9374
    """
9375
    iv_names = {}
9376

    
9377
    for idx, dev in enumerate(self.instance.disks):
9378
      if idx not in self.disks:
9379
        continue
9380

    
9381
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
9382

    
9383
      self.cfg.SetDiskID(dev, node_name)
9384

    
9385
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
9386
      names = _GenerateUniqueNames(self.lu, lv_names)
9387

    
9388
      vg_data = dev.children[0].logical_id[0]
9389
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
9390
                             logical_id=(vg_data, names[0]))
9391
      vg_meta = dev.children[1].logical_id[0]
9392
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
9393
                             logical_id=(vg_meta, names[1]))
9394

    
9395
      new_lvs = [lv_data, lv_meta]
9396
      old_lvs = [child.Copy() for child in dev.children]
9397
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
9398

    
9399
      # we pass force_create=True to force the LVM creation
9400
      for new_lv in new_lvs:
9401
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
9402
                        _GetInstanceInfoText(self.instance), False)
9403

    
9404
    return iv_names
9405

    
9406
  def _CheckDevices(self, node_name, iv_names):
9407
    for name, (dev, _, _) in iv_names.iteritems():
9408
      self.cfg.SetDiskID(dev, node_name)
9409

    
9410
      result = self.rpc.call_blockdev_find(node_name, dev)
9411

    
9412
      msg = result.fail_msg
9413
      if msg or not result.payload:
9414
        if not msg:
9415
          msg = "disk not found"
9416
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
9417
                                 (name, msg))
9418

    
9419
      if result.payload.is_degraded:
9420
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
9421

    
9422
  def _RemoveOldStorage(self, node_name, iv_names):
9423
    for name, (_, old_lvs, _) in iv_names.iteritems():
9424
      self.lu.LogInfo("Remove logical volumes for %s" % name)
9425

    
9426
      for lv in old_lvs:
9427
        self.cfg.SetDiskID(lv, node_name)
9428

    
9429
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
9430
        if msg:
9431
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
9432
                             hint="remove unused LVs manually")
9433

    
9434
  def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable-msg=W0613
9435
    """Replace a disk on the primary or secondary for DRBD 8.
9436

9437
    The algorithm for replace is quite complicated:
9438

9439
      1. for each disk to be replaced:
9440

9441
        1. create new LVs on the target node with unique names
9442
        1. detach old LVs from the drbd device
9443
        1. rename old LVs to name_replaced.<time_t>
9444
        1. rename new LVs to old LVs
9445
        1. attach the new LVs (with the old names now) to the drbd device
9446

9447
      1. wait for sync across all devices
9448

9449
      1. for each modified disk:
9450

9451
        1. remove old LVs (which have the name name_replaces.<time_t>)
9452

9453
    Failures are not very well handled.
9454

9455
    """
9456
    steps_total = 6
9457

    
9458
    # Step: check device activation
9459
    self.lu.LogStep(1, steps_total, "Check device existence")
9460
    self._CheckDisksExistence([self.other_node, self.target_node])
9461
    self._CheckVolumeGroup([self.target_node, self.other_node])
9462

    
9463
    # Step: check other node consistency
9464
    self.lu.LogStep(2, steps_total, "Check peer consistency")
9465
    self._CheckDisksConsistency(self.other_node,
9466
                                self.other_node == self.instance.primary_node,
9467
                                False)
9468

    
9469
    # Step: create new storage
9470
    self.lu.LogStep(3, steps_total, "Allocate new storage")
9471
    iv_names = self._CreateNewStorage(self.target_node)
9472

    
9473
    # Step: for each lv, detach+rename*2+attach
9474
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
9475
    for dev, old_lvs, new_lvs in iv_names.itervalues():
9476
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
9477

    
9478
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
9479
                                                     old_lvs)
9480
      result.Raise("Can't detach drbd from local storage on node"
9481
                   " %s for device %s" % (self.target_node, dev.iv_name))
9482
      #dev.children = []
9483
      #cfg.Update(instance)
9484

    
9485
      # ok, we created the new LVs, so now we know we have the needed
9486
      # storage; as such, we proceed on the target node to rename
9487
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
9488
      # using the assumption that logical_id == physical_id (which in
9489
      # turn is the unique_id on that node)
9490

    
9491
      # FIXME(iustin): use a better name for the replaced LVs
9492
      temp_suffix = int(time.time())
9493
      ren_fn = lambda d, suff: (d.physical_id[0],
9494
                                d.physical_id[1] + "_replaced-%s" % suff)
9495

    
9496
      # Build the rename list based on what LVs exist on the node
9497
      rename_old_to_new = []
9498
      for to_ren in old_lvs:
9499
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
9500
        if not result.fail_msg and result.payload:
9501
          # device exists
9502
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
9503

    
9504
      self.lu.LogInfo("Renaming the old LVs on the target node")
9505
      result = self.rpc.call_blockdev_rename(self.target_node,
9506
                                             rename_old_to_new)
9507
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
9508

    
9509
      # Now we rename the new LVs to the old LVs
9510
      self.lu.LogInfo("Renaming the new LVs on the target node")
9511
      rename_new_to_old = [(new, old.physical_id)
9512
                           for old, new in zip(old_lvs, new_lvs)]
9513
      result = self.rpc.call_blockdev_rename(self.target_node,
9514
                                             rename_new_to_old)
9515
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
9516

    
9517
      # Intermediate steps of in memory modifications
9518
      for old, new in zip(old_lvs, new_lvs):
9519
        new.logical_id = old.logical_id
9520
        self.cfg.SetDiskID(new, self.target_node)
9521

    
9522
      # We need to modify old_lvs so that removal later removes the
9523
      # right LVs, not the newly added ones; note that old_lvs is a
9524
      # copy here
9525
      for disk in old_lvs:
9526
        disk.logical_id = ren_fn(disk, temp_suffix)
9527
        self.cfg.SetDiskID(disk, self.target_node)
9528

    
9529
      # Now that the new lvs have the old name, we can add them to the device
9530
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
9531
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
9532
                                                  new_lvs)
9533
      msg = result.fail_msg
9534
      if msg:
9535
        for new_lv in new_lvs:
9536
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
9537
                                               new_lv).fail_msg
9538
          if msg2:
9539
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
9540
                               hint=("cleanup manually the unused logical"
9541
                                     "volumes"))
9542
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
9543

    
9544
    cstep = 5
9545
    if self.early_release:
9546
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
9547
      cstep += 1
9548
      self._RemoveOldStorage(self.target_node, iv_names)
9549
      # WARNING: we release both node locks here, do not do other RPCs
9550
      # than WaitForSync to the primary node
9551
      _ReleaseLocks(self.lu, locking.LEVEL_NODE,
9552
                    names=[self.target_node, self.other_node])
9553

    
9554
    # Wait for sync
9555
    # This can fail as the old devices are degraded and _WaitForSync
9556
    # does a combined result over all disks, so we don't check its return value
9557
    self.lu.LogStep(cstep, steps_total, "Sync devices")
9558
    cstep += 1
9559
    _WaitForSync(self.lu, self.instance)
9560

    
9561
    # Check all devices manually
9562
    self._CheckDevices(self.instance.primary_node, iv_names)
9563

    
9564
    # Step: remove old storage
9565
    if not self.early_release:
9566
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
9567
      cstep += 1
9568
      self._RemoveOldStorage(self.target_node, iv_names)
9569

    
9570
  def _ExecDrbd8Secondary(self, feedback_fn):
9571
    """Replace the secondary node for DRBD 8.
9572

9573
    The algorithm for replace is quite complicated:
9574
      - for all disks of the instance:
9575
        - create new LVs on the new node with same names
9576
        - shutdown the drbd device on the old secondary
9577
        - disconnect the drbd network on the primary
9578
        - create the drbd device on the new secondary
9579
        - network attach the drbd on the primary, using an artifice:
9580
          the drbd code for Attach() will connect to the network if it
9581
          finds a device which is connected to the good local disks but
9582
          not network enabled
9583
      - wait for sync across all devices
9584
      - remove all disks from the old secondary
9585

9586
    Failures are not very well handled.
9587

9588
    """
9589
    steps_total = 6
9590

    
9591
    # Step: check device activation
9592
    self.lu.LogStep(1, steps_total, "Check device existence")
9593
    self._CheckDisksExistence([self.instance.primary_node])
9594
    self._CheckVolumeGroup([self.instance.primary_node])
9595

    
9596
    # Step: check other node consistency
9597
    self.lu.LogStep(2, steps_total, "Check peer consistency")
9598
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
9599

    
9600
    # Step: create new storage
9601
    self.lu.LogStep(3, steps_total, "Allocate new storage")
9602
    for idx, dev in enumerate(self.instance.disks):
9603
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
9604
                      (self.new_node, idx))
9605
      # we pass force_create=True to force LVM creation
9606
      for new_lv in dev.children:
9607
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
9608
                        _GetInstanceInfoText(self.instance), False)
9609

    
9610
    # Step 4: dbrd minors and drbd setups changes
9611
    # after this, we must manually remove the drbd minors on both the
9612
    # error and the success paths
9613
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
9614
    minors = self.cfg.AllocateDRBDMinor([self.new_node
9615
                                         for dev in self.instance.disks],
9616
                                        self.instance.name)
9617
    logging.debug("Allocated minors %r", minors)
9618

    
9619
    iv_names = {}
9620
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
9621
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
9622
                      (self.new_node, idx))
9623
      # create new devices on new_node; note that we create two IDs:
9624
      # one without port, so the drbd will be activated without
9625
      # networking information on the new node at this stage, and one
9626
      # with network, for the latter activation in step 4
9627
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
9628
      if self.instance.primary_node == o_node1:
9629
        p_minor = o_minor1
9630
      else:
9631
        assert self.instance.primary_node == o_node2, "Three-node instance?"
9632
        p_minor = o_minor2
9633

    
9634
      new_alone_id = (self.instance.primary_node, self.new_node, None,
9635
                      p_minor, new_minor, o_secret)
9636
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
9637
                    p_minor, new_minor, o_secret)
9638

    
9639
      iv_names[idx] = (dev, dev.children, new_net_id)
9640
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
9641
                    new_net_id)
9642
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
9643
                              logical_id=new_alone_id,
9644
                              children=dev.children,
9645
                              size=dev.size)
9646
      try:
9647
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
9648
                              _GetInstanceInfoText(self.instance), False)
9649
      except errors.GenericError:
9650
        self.cfg.ReleaseDRBDMinors(self.instance.name)
9651
        raise
9652

    
9653
    # We have new devices, shutdown the drbd on the old secondary
9654
    for idx, dev in enumerate(self.instance.disks):
9655
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
9656
      self.cfg.SetDiskID(dev, self.target_node)
9657
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
9658
      if msg:
9659
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
9660
                           "node: %s" % (idx, msg),
9661
                           hint=("Please cleanup this device manually as"
9662
                                 " soon as possible"))
9663

    
9664
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
9665
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
9666
                                               self.node_secondary_ip,
9667
                                               self.instance.disks)\
9668
                                              [self.instance.primary_node]
9669

    
9670
    msg = result.fail_msg
9671
    if msg:
9672
      # detaches didn't succeed (unlikely)
9673
      self.cfg.ReleaseDRBDMinors(self.instance.name)
9674
      raise errors.OpExecError("Can't detach the disks from the network on"
9675
                               " old node: %s" % (msg,))
9676

    
9677
    # if we managed to detach at least one, we update all the disks of
9678
    # the instance to point to the new secondary
9679
    self.lu.LogInfo("Updating instance configuration")
9680
    for dev, _, new_logical_id in iv_names.itervalues():
9681
      dev.logical_id = new_logical_id
9682
      self.cfg.SetDiskID(dev, self.instance.primary_node)
9683

    
9684
    self.cfg.Update(self.instance, feedback_fn)
9685

    
9686
    # and now perform the drbd attach
9687
    self.lu.LogInfo("Attaching primary drbds to new secondary"
9688
                    " (standalone => connected)")
9689
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
9690
                                            self.new_node],
9691
                                           self.node_secondary_ip,
9692
                                           self.instance.disks,
9693
                                           self.instance.name,
9694
                                           False)
9695
    for to_node, to_result in result.items():
9696
      msg = to_result.fail_msg
9697
      if msg:
9698
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
9699
                           to_node, msg,
9700
                           hint=("please do a gnt-instance info to see the"
9701
                                 " status of disks"))
9702
    cstep = 5
9703
    if self.early_release:
9704
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
9705
      cstep += 1
9706
      self._RemoveOldStorage(self.target_node, iv_names)
9707
      # WARNING: we release all node locks here, do not do other RPCs
9708
      # than WaitForSync to the primary node
9709
      _ReleaseLocks(self.lu, locking.LEVEL_NODE,
9710
                    names=[self.instance.primary_node,
9711
                           self.target_node,
9712
                           self.new_node])
9713

    
9714
    # Wait for sync
9715
    # This can fail as the old devices are degraded and _WaitForSync
9716
    # does a combined result over all disks, so we don't check its return value
9717
    self.lu.LogStep(cstep, steps_total, "Sync devices")
9718
    cstep += 1
9719
    _WaitForSync(self.lu, self.instance)
9720

    
9721
    # Check all devices manually
9722
    self._CheckDevices(self.instance.primary_node, iv_names)
9723

    
9724
    # Step: remove old storage
9725
    if not self.early_release:
9726
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
9727
      self._RemoveOldStorage(self.target_node, iv_names)
9728

    
9729

    
9730
class LURepairNodeStorage(NoHooksLU):
9731
  """Repairs the volume group on a node.
9732

9733
  """
9734
  REQ_BGL = False
9735

    
9736
  def CheckArguments(self):
9737
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
9738

    
9739
    storage_type = self.op.storage_type
9740

    
9741
    if (constants.SO_FIX_CONSISTENCY not in
9742
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
9743
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
9744
                                 " repaired" % storage_type,
9745
                                 errors.ECODE_INVAL)
9746

    
9747
  def ExpandNames(self):
9748
    self.needed_locks = {
9749
      locking.LEVEL_NODE: [self.op.node_name],
9750
      }
9751

    
9752
  def _CheckFaultyDisks(self, instance, node_name):
9753
    """Ensure faulty disks abort the opcode or at least warn."""
9754
    try:
9755
      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
9756
                                  node_name, True):
9757
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
9758
                                   " node '%s'" % (instance.name, node_name),
9759
                                   errors.ECODE_STATE)
9760
    except errors.OpPrereqError, err:
9761
      if self.op.ignore_consistency:
9762
        self.proc.LogWarning(str(err.args[0]))
9763
      else:
9764
        raise
9765

    
9766
  def CheckPrereq(self):
9767
    """Check prerequisites.
9768

9769
    """
9770
    # Check whether any instance on this node has faulty disks
9771
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
9772
      if not inst.admin_up:
9773
        continue
9774
      check_nodes = set(inst.all_nodes)
9775
      check_nodes.discard(self.op.node_name)
9776
      for inst_node_name in check_nodes:
9777
        self._CheckFaultyDisks(inst, inst_node_name)
9778

    
9779
  def Exec(self, feedback_fn):
9780
    feedback_fn("Repairing storage unit '%s' on %s ..." %
9781
                (self.op.name, self.op.node_name))
9782

    
9783
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
9784
    result = self.rpc.call_storage_execute(self.op.node_name,
9785
                                           self.op.storage_type, st_args,
9786
                                           self.op.name,
9787
                                           constants.SO_FIX_CONSISTENCY)
9788
    result.Raise("Failed to repair storage unit '%s' on %s" %
9789
                 (self.op.name, self.op.node_name))
9790

    
9791

    
9792
class LUNodeEvacuate(NoHooksLU):
9793
  """Evacuates instances off a list of nodes.
9794

9795
  """
9796
  REQ_BGL = False
9797

    
9798
  def CheckArguments(self):
9799
    _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
9800

    
9801
  def ExpandNames(self):
9802
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
9803

    
9804
    if self.op.remote_node is not None:
9805
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9806
      assert self.op.remote_node
9807

    
9808
      if self.op.remote_node == self.op.node_name:
9809
        raise errors.OpPrereqError("Can not use evacuated node as a new"
9810
                                   " secondary node", errors.ECODE_INVAL)
9811

    
9812
      if self.op.mode != constants.IALLOCATOR_NEVAC_SEC:
9813
        raise errors.OpPrereqError("Without the use of an iallocator only"
9814
                                   " secondary instances can be evacuated",
9815
                                   errors.ECODE_INVAL)
9816

    
9817
    # Declare locks
9818
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
9819
    self.needed_locks = {
9820
      locking.LEVEL_INSTANCE: [],
9821
      locking.LEVEL_NODEGROUP: [],
9822
      locking.LEVEL_NODE: [],
9823
      }
9824

    
9825
    if self.op.remote_node is None:
9826
      # Iallocator will choose any node(s) in the same group
9827
      group_nodes = self.cfg.GetNodeGroupMembersByNodes([self.op.node_name])
9828
    else:
9829
      group_nodes = frozenset([self.op.remote_node])
9830

    
9831
    # Determine nodes to be locked
9832
    self.lock_nodes = set([self.op.node_name]) | group_nodes
9833

    
9834
  def _DetermineInstances(self):
9835
    """Builds list of instances to operate on.
9836

9837
    """
9838
    assert self.op.mode in constants.IALLOCATOR_NEVAC_MODES
9839

    
9840
    if self.op.mode == constants.IALLOCATOR_NEVAC_PRI:
9841
      # Primary instances only
9842
      inst_fn = _GetNodePrimaryInstances
9843
      assert self.op.remote_node is None, \
9844
        "Evacuating primary instances requires iallocator"
9845
    elif self.op.mode == constants.IALLOCATOR_NEVAC_SEC:
9846
      # Secondary instances only
9847
      inst_fn = _GetNodeSecondaryInstances
9848
    else:
9849
      # All instances
9850
      assert self.op.mode == constants.IALLOCATOR_NEVAC_ALL
9851
      inst_fn = _GetNodeInstances
9852

    
9853
    return inst_fn(self.cfg, self.op.node_name)
9854

    
9855
  def DeclareLocks(self, level):
9856
    if level == locking.LEVEL_INSTANCE:
9857
      # Lock instances optimistically, needs verification once node and group
9858
      # locks have been acquired
9859
      self.needed_locks[locking.LEVEL_INSTANCE] = \
9860
        set(i.name for i in self._DetermineInstances())
9861

    
9862
    elif level == locking.LEVEL_NODEGROUP:
9863
      # Lock node groups optimistically, needs verification once nodes have
9864
      # been acquired
9865
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
9866
        self.cfg.GetNodeGroupsFromNodes(self.lock_nodes)
9867

    
9868
    elif level == locking.LEVEL_NODE:
9869
      self.needed_locks[locking.LEVEL_NODE] = self.lock_nodes
9870

    
9871
  def CheckPrereq(self):
9872
    # Verify locks
9873
    owned_instances = self.glm.list_owned(locking.LEVEL_INSTANCE)
9874
    owned_nodes = self.glm.list_owned(locking.LEVEL_NODE)
9875
    owned_groups = self.glm.list_owned(locking.LEVEL_NODEGROUP)
9876

    
9877
    assert owned_nodes == self.lock_nodes
9878

    
9879
    wanted_groups = self.cfg.GetNodeGroupsFromNodes(owned_nodes)
9880
    if owned_groups != wanted_groups:
9881
      raise errors.OpExecError("Node groups changed since locks were acquired,"
9882
                               " current groups are '%s', used to be '%s'" %
9883
                               (utils.CommaJoin(wanted_groups),
9884
                                utils.CommaJoin(owned_groups)))
9885

    
9886
    # Determine affected instances
9887
    self.instances = self._DetermineInstances()
9888
    self.instance_names = [i.name for i in self.instances]
9889

    
9890
    if set(self.instance_names) != owned_instances:
9891
      raise errors.OpExecError("Instances on node '%s' changed since locks"
9892
                               " were acquired, current instances are '%s',"
9893
                               " used to be '%s'" %
9894
                               (self.op.node_name,
9895
                                utils.CommaJoin(self.instance_names),
9896
                                utils.CommaJoin(owned_instances)))
9897

    
9898
    if self.instance_names:
9899
      self.LogInfo("Evacuating instances from node '%s': %s",
9900
                   self.op.node_name,
9901
                   utils.CommaJoin(utils.NiceSort(self.instance_names)))
9902
    else:
9903
      self.LogInfo("No instances to evacuate from node '%s'",
9904
                   self.op.node_name)
9905

    
9906
    if self.op.remote_node is not None:
9907
      for i in self.instances:
9908
        if i.primary_node == self.op.remote_node:
9909
          raise errors.OpPrereqError("Node %s is the primary node of"
9910
                                     " instance %s, cannot use it as"
9911
                                     " secondary" %
9912
                                     (self.op.remote_node, i.name),
9913
                                     errors.ECODE_INVAL)
9914

    
9915
  def Exec(self, feedback_fn):
9916
    assert (self.op.iallocator is not None) ^ (self.op.remote_node is not None)
9917

    
9918
    if not self.instance_names:
9919
      # No instances to evacuate
9920
      jobs = []
9921

    
9922
    elif self.op.iallocator is not None:
9923
      # TODO: Implement relocation to other group
9924
      ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_NODE_EVAC,
9925
                       evac_mode=self.op.mode,
9926
                       instances=list(self.instance_names))
9927

    
9928
      ial.Run(self.op.iallocator)
9929

    
9930
      if not ial.success:
9931
        raise errors.OpPrereqError("Can't compute node evacuation using"
9932
                                   " iallocator '%s': %s" %
9933
                                   (self.op.iallocator, ial.info),
9934
                                   errors.ECODE_NORES)
9935

    
9936
      jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, True)
9937

    
9938
    elif self.op.remote_node is not None:
9939
      assert self.op.mode == constants.IALLOCATOR_NEVAC_SEC
9940
      jobs = [
9941
        [opcodes.OpInstanceReplaceDisks(instance_name=instance_name,
9942
                                        remote_node=self.op.remote_node,
9943
                                        disks=[],
9944
                                        mode=constants.REPLACE_DISK_CHG,
9945
                                        early_release=self.op.early_release)]
9946
        for instance_name in self.instance_names
9947
        ]
9948

    
9949
    else:
9950
      raise errors.ProgrammerError("No iallocator or remote node")
9951

    
9952
    return ResultWithJobs(jobs)
9953

    
9954

    
9955
def _SetOpEarlyRelease(early_release, op):
9956
  """Sets C{early_release} flag on opcodes if available.
9957

9958
  """
9959
  try:
9960
    op.early_release = early_release
9961
  except AttributeError:
9962
    assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
9963

    
9964
  return op
9965

    
9966

    
9967
def _NodeEvacDest(use_nodes, group, nodes):
9968
  """Returns group or nodes depending on caller's choice.
9969

9970
  """
9971
  if use_nodes:
9972
    return utils.CommaJoin(nodes)
9973
  else:
9974
    return group
9975

    
9976

    
9977
def _LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
9978
  """Unpacks the result of change-group and node-evacuate iallocator requests.
9979

9980
  Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
9981
  L{constants.IALLOCATOR_MODE_CHG_GROUP}.
9982

9983
  @type lu: L{LogicalUnit}
9984
  @param lu: Logical unit instance
9985
  @type alloc_result: tuple/list
9986
  @param alloc_result: Result from iallocator
9987
  @type early_release: bool
9988
  @param early_release: Whether to release locks early if possible
9989
  @type use_nodes: bool
9990
  @param use_nodes: Whether to display node names instead of groups
9991

9992
  """
9993
  (moved, failed, jobs) = alloc_result
9994

    
9995
  if failed:
9996
    lu.LogWarning("Unable to evacuate instances %s",
9997
                  utils.CommaJoin("%s (%s)" % (name, reason)
9998
                                  for (name, reason) in failed))
9999

    
10000
  if moved:
10001
    lu.LogInfo("Instances to be moved: %s",
10002
               utils.CommaJoin("%s (to %s)" %
10003
                               (name, _NodeEvacDest(use_nodes, group, nodes))
10004
                               for (name, group, nodes) in moved))
10005

    
10006
  return [map(compat.partial(_SetOpEarlyRelease, early_release),
10007
              map(opcodes.OpCode.LoadOpCode, ops))
10008
          for ops in jobs]
10009

    
10010

    
10011
class LUInstanceGrowDisk(LogicalUnit):
10012
  """Grow a disk of an instance.
10013

10014
  """
10015
  HPATH = "disk-grow"
10016
  HTYPE = constants.HTYPE_INSTANCE
10017
  REQ_BGL = False
10018

    
10019
  def ExpandNames(self):
10020
    self._ExpandAndLockInstance()
10021
    self.needed_locks[locking.LEVEL_NODE] = []
10022
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
10023

    
10024
  def DeclareLocks(self, level):
10025
    if level == locking.LEVEL_NODE:
10026
      self._LockInstancesNodes()
10027

    
10028
  def BuildHooksEnv(self):
10029
    """Build hooks env.
10030

10031
    This runs on the master, the primary and all the secondaries.
10032

10033
    """
10034
    env = {
10035
      "DISK": self.op.disk,
10036
      "AMOUNT": self.op.amount,
10037
      }
10038
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
10039
    return env
10040

    
10041
  def BuildHooksNodes(self):
10042
    """Build hooks nodes.
10043

10044
    """
10045
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
10046
    return (nl, nl)
10047

    
10048
  def CheckPrereq(self):
10049
    """Check prerequisites.
10050

10051
    This checks that the instance is in the cluster.
10052

10053
    """
10054
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
10055
    assert instance is not None, \
10056
      "Cannot retrieve locked instance %s" % self.op.instance_name
10057
    nodenames = list(instance.all_nodes)
10058
    for node in nodenames:
10059
      _CheckNodeOnline(self, node)
10060

    
10061
    self.instance = instance
10062

    
10063
    if instance.disk_template not in constants.DTS_GROWABLE:
10064
      raise errors.OpPrereqError("Instance's disk layout does not support"
10065
                                 " growing", errors.ECODE_INVAL)
10066

    
10067
    self.disk = instance.FindDisk(self.op.disk)
10068

    
10069
    if instance.disk_template not in (constants.DT_FILE,
10070
                                      constants.DT_SHARED_FILE):
10071
      # TODO: check the free disk space for file, when that feature will be
10072
      # supported
10073
      _CheckNodesFreeDiskPerVG(self, nodenames,
10074
                               self.disk.ComputeGrowth(self.op.amount))
10075

    
10076
  def Exec(self, feedback_fn):
10077
    """Execute disk grow.
10078

10079
    """
10080
    instance = self.instance
10081
    disk = self.disk
10082

    
10083
    disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
10084
    if not disks_ok:
10085
      raise errors.OpExecError("Cannot activate block device to grow")
10086

    
10087
    # First run all grow ops in dry-run mode
10088
    for node in instance.all_nodes:
10089
      self.cfg.SetDiskID(disk, node)
10090
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount, True)
10091
      result.Raise("Grow request failed to node %s" % node)
10092

    
10093
    # We know that (as far as we can test) operations across different
10094
    # nodes will succeed, time to run it for real
10095
    for node in instance.all_nodes:
10096
      self.cfg.SetDiskID(disk, node)
10097
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount, False)
10098
      result.Raise("Grow request failed to node %s" % node)
10099

    
10100
      # TODO: Rewrite code to work properly
10101
      # DRBD goes into sync mode for a short amount of time after executing the
10102
      # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
10103
      # calling "resize" in sync mode fails. Sleeping for a short amount of
10104
      # time is a work-around.
10105
      time.sleep(5)
10106

    
10107
    disk.RecordGrow(self.op.amount)
10108
    self.cfg.Update(instance, feedback_fn)
10109
    if self.op.wait_for_sync:
10110
      disk_abort = not _WaitForSync(self, instance, disks=[disk])
10111
      if disk_abort:
10112
        self.proc.LogWarning("Disk sync-ing has not returned a good"
10113
                             " status; please check the instance")
10114
      if not instance.admin_up:
10115
        _SafeShutdownInstanceDisks(self, instance, disks=[disk])
10116
    elif not instance.admin_up:
10117
      self.proc.LogWarning("Not shutting down the disk even if the instance is"
10118
                           " not supposed to be running because no wait for"
10119
                           " sync mode was requested")
10120

    
10121

    
10122
class LUInstanceQueryData(NoHooksLU):
10123
  """Query runtime instance data.
10124

10125
  """
10126
  REQ_BGL = False
10127

    
10128
  def ExpandNames(self):
10129
    self.needed_locks = {}
10130

    
10131
    # Use locking if requested or when non-static information is wanted
10132
    if not (self.op.static or self.op.use_locking):
10133
      self.LogWarning("Non-static data requested, locks need to be acquired")
10134
      self.op.use_locking = True
10135

    
10136
    if self.op.instances or not self.op.use_locking:
10137
      # Expand instance names right here
10138
      self.wanted_names = _GetWantedInstances(self, self.op.instances)
10139
    else:
10140
      # Will use acquired locks
10141
      self.wanted_names = None
10142

    
10143
    if self.op.use_locking:
10144
      self.share_locks = dict.fromkeys(locking.LEVELS, 1)
10145

    
10146
      if self.wanted_names is None:
10147
        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
10148
      else:
10149
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
10150

    
10151
      self.needed_locks[locking.LEVEL_NODE] = []
10152
      self.share_locks = dict.fromkeys(locking.LEVELS, 1)
10153
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
10154

    
10155
  def DeclareLocks(self, level):
10156
    if self.op.use_locking and level == locking.LEVEL_NODE:
10157
      self._LockInstancesNodes()
10158

    
10159
  def CheckPrereq(self):
10160
    """Check prerequisites.
10161

10162
    This only checks the optional instance list against the existing names.
10163

10164
    """
10165
    if self.wanted_names is None:
10166
      assert self.op.use_locking, "Locking was not used"
10167
      self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
10168

    
10169
    self.wanted_instances = [self.cfg.GetInstanceInfo(name)
10170
                             for name in self.wanted_names]
10171

    
10172
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
10173
    """Returns the status of a block device
10174

10175
    """
10176
    if self.op.static or not node:
10177
      return None
10178

    
10179
    self.cfg.SetDiskID(dev, node)
10180

    
10181
    result = self.rpc.call_blockdev_find(node, dev)
10182
    if result.offline:
10183
      return None
10184

    
10185
    result.Raise("Can't compute disk status for %s" % instance_name)
10186

    
10187
    status = result.payload
10188
    if status is None:
10189
      return None
10190

    
10191
    return (status.dev_path, status.major, status.minor,
10192
            status.sync_percent, status.estimated_time,
10193
            status.is_degraded, status.ldisk_status)
10194

    
10195
  def _ComputeDiskStatus(self, instance, snode, dev):
10196
    """Compute block device status.
10197

10198
    """
10199
    if dev.dev_type in constants.LDS_DRBD:
10200
      # we change the snode then (otherwise we use the one passed in)
10201
      if dev.logical_id[0] == instance.primary_node:
10202
        snode = dev.logical_id[1]
10203
      else:
10204
        snode = dev.logical_id[0]
10205

    
10206
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
10207
                                              instance.name, dev)
10208
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
10209

    
10210
    if dev.children:
10211
      dev_children = map(compat.partial(self._ComputeDiskStatus,
10212
                                        instance, snode),
10213
                         dev.children)
10214
    else:
10215
      dev_children = []
10216

    
10217
    return {
10218
      "iv_name": dev.iv_name,
10219
      "dev_type": dev.dev_type,
10220
      "logical_id": dev.logical_id,
10221
      "physical_id": dev.physical_id,
10222
      "pstatus": dev_pstatus,
10223
      "sstatus": dev_sstatus,
10224
      "children": dev_children,
10225
      "mode": dev.mode,
10226
      "size": dev.size,
10227
      }
10228

    
10229
  def Exec(self, feedback_fn):
10230
    """Gather and return data"""
10231
    result = {}
10232

    
10233
    cluster = self.cfg.GetClusterInfo()
10234

    
10235
    for instance in self.wanted_instances:
10236
      pnode = self.cfg.GetNodeInfo(instance.primary_node)
10237

    
10238
      if self.op.static or pnode.offline:
10239
        remote_state = None
10240
        if pnode.offline:
10241
          self.LogWarning("Primary node %s is marked offline, returning static"
10242
                          " information only for instance %s" %
10243
                          (pnode.name, instance.name))
10244
      else:
10245
        remote_info = self.rpc.call_instance_info(instance.primary_node,
10246
                                                  instance.name,
10247
                                                  instance.hypervisor)
10248
        remote_info.Raise("Error checking node %s" % instance.primary_node)
10249
        remote_info = remote_info.payload
10250
        if remote_info and "state" in remote_info:
10251
          remote_state = "up"
10252
        else:
10253
          remote_state = "down"
10254

    
10255
      if instance.admin_up:
10256
        config_state = "up"
10257
      else:
10258
        config_state = "down"
10259

    
10260
      disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
10261
                  instance.disks)
10262

    
10263
      result[instance.name] = {
10264
        "name": instance.name,
10265
        "config_state": config_state,
10266
        "run_state": remote_state,
10267
        "pnode": instance.primary_node,
10268
        "snodes": instance.secondary_nodes,
10269
        "os": instance.os,
10270
        # this happens to be the same format used for hooks
10271
        "nics": _NICListToTuple(self, instance.nics),
10272
        "disk_template": instance.disk_template,
10273
        "disks": disks,
10274
        "hypervisor": instance.hypervisor,
10275
        "network_port": instance.network_port,
10276
        "hv_instance": instance.hvparams,
10277
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
10278
        "be_instance": instance.beparams,
10279
        "be_actual": cluster.FillBE(instance),
10280
        "os_instance": instance.osparams,
10281
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
10282
        "serial_no": instance.serial_no,
10283
        "mtime": instance.mtime,
10284
        "ctime": instance.ctime,
10285
        "uuid": instance.uuid,
10286
        }
10287

    
10288
    return result
10289

    
10290

    
10291
class LUInstanceSetParams(LogicalUnit):
10292
  """Modifies an instances's parameters.
10293

10294
  """
10295
  HPATH = "instance-modify"
10296
  HTYPE = constants.HTYPE_INSTANCE
10297
  REQ_BGL = False
10298

    
10299
  def CheckArguments(self):
10300
    if not (self.op.nics or self.op.disks or self.op.disk_template or
10301
            self.op.hvparams or self.op.beparams or self.op.os_name):
10302
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
10303

    
10304
    if self.op.hvparams:
10305
      _CheckGlobalHvParams(self.op.hvparams)
10306

    
10307
    # Disk validation
10308
    disk_addremove = 0
10309
    for disk_op, disk_dict in self.op.disks:
10310
      utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
10311
      if disk_op == constants.DDM_REMOVE:
10312
        disk_addremove += 1
10313
        continue
10314
      elif disk_op == constants.DDM_ADD:
10315
        disk_addremove += 1
10316
      else:
10317
        if not isinstance(disk_op, int):
10318
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
10319
        if not isinstance(disk_dict, dict):
10320
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
10321
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
10322

    
10323
      if disk_op == constants.DDM_ADD:
10324
        mode = disk_dict.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
10325
        if mode not in constants.DISK_ACCESS_SET:
10326
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
10327
                                     errors.ECODE_INVAL)
10328
        size = disk_dict.get(constants.IDISK_SIZE, None)
10329
        if size is None:
10330
          raise errors.OpPrereqError("Required disk parameter size missing",
10331
                                     errors.ECODE_INVAL)
10332
        try:
10333
          size = int(size)
10334
        except (TypeError, ValueError), err:
10335
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
10336
                                     str(err), errors.ECODE_INVAL)
10337
        disk_dict[constants.IDISK_SIZE] = size
10338
      else:
10339
        # modification of disk
10340
        if constants.IDISK_SIZE in disk_dict:
10341
          raise errors.OpPrereqError("Disk size change not possible, use"
10342
                                     " grow-disk", errors.ECODE_INVAL)
10343

    
10344
    if disk_addremove > 1:
10345
      raise errors.OpPrereqError("Only one disk add or remove operation"
10346
                                 " supported at a time", errors.ECODE_INVAL)
10347

    
10348
    if self.op.disks and self.op.disk_template is not None:
10349
      raise errors.OpPrereqError("Disk template conversion and other disk"
10350
                                 " changes not supported at the same time",
10351
                                 errors.ECODE_INVAL)
10352

    
10353
    if (self.op.disk_template and
10354
        self.op.disk_template in constants.DTS_INT_MIRROR and
10355
        self.op.remote_node is None):
10356
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
10357
                                 " one requires specifying a secondary node",
10358
                                 errors.ECODE_INVAL)
10359

    
10360
    # NIC validation
10361
    nic_addremove = 0
10362
    for nic_op, nic_dict in self.op.nics:
10363
      utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
10364
      if nic_op == constants.DDM_REMOVE:
10365
        nic_addremove += 1
10366
        continue
10367
      elif nic_op == constants.DDM_ADD:
10368
        nic_addremove += 1
10369
      else:
10370
        if not isinstance(nic_op, int):
10371
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
10372
        if not isinstance(nic_dict, dict):
10373
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
10374
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
10375

    
10376
      # nic_dict should be a dict
10377
      nic_ip = nic_dict.get(constants.INIC_IP, None)
10378
      if nic_ip is not None:
10379
        if nic_ip.lower() == constants.VALUE_NONE:
10380
          nic_dict[constants.INIC_IP] = None
10381
        else:
10382
          if not netutils.IPAddress.IsValid(nic_ip):
10383
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
10384
                                       errors.ECODE_INVAL)
10385

    
10386
      nic_bridge = nic_dict.get("bridge", None)
10387
      nic_link = nic_dict.get(constants.INIC_LINK, None)
10388
      if nic_bridge and nic_link:
10389
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
10390
                                   " at the same time", errors.ECODE_INVAL)
10391
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
10392
        nic_dict["bridge"] = None
10393
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
10394
        nic_dict[constants.INIC_LINK] = None
10395

    
10396
      if nic_op == constants.DDM_ADD:
10397
        nic_mac = nic_dict.get(constants.INIC_MAC, None)
10398
        if nic_mac is None:
10399
          nic_dict[constants.INIC_MAC] = constants.VALUE_AUTO
10400

    
10401
      if constants.INIC_MAC in nic_dict:
10402
        nic_mac = nic_dict[constants.INIC_MAC]
10403
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
10404
          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
10405

    
10406
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
10407
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
10408
                                     " modifying an existing nic",
10409
                                     errors.ECODE_INVAL)
10410

    
10411
    if nic_addremove > 1:
10412
      raise errors.OpPrereqError("Only one NIC add or remove operation"
10413
                                 " supported at a time", errors.ECODE_INVAL)
10414

    
10415
  def ExpandNames(self):
10416
    self._ExpandAndLockInstance()
10417
    self.needed_locks[locking.LEVEL_NODE] = []
10418
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
10419

    
10420
  def DeclareLocks(self, level):
10421
    if level == locking.LEVEL_NODE:
10422
      self._LockInstancesNodes()
10423
      if self.op.disk_template and self.op.remote_node:
10424
        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
10425
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
10426

    
10427
  def BuildHooksEnv(self):
10428
    """Build hooks env.
10429

10430
    This runs on the master, primary and secondaries.
10431

10432
    """
10433
    args = dict()
10434
    if constants.BE_MEMORY in self.be_new:
10435
      args["memory"] = self.be_new[constants.BE_MEMORY]
10436
    if constants.BE_VCPUS in self.be_new:
10437
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
10438
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
10439
    # information at all.
10440
    if self.op.nics:
10441
      args["nics"] = []
10442
      nic_override = dict(self.op.nics)
10443
      for idx, nic in enumerate(self.instance.nics):
10444
        if idx in nic_override:
10445
          this_nic_override = nic_override[idx]
10446
        else:
10447
          this_nic_override = {}
10448
        if constants.INIC_IP in this_nic_override:
10449
          ip = this_nic_override[constants.INIC_IP]
10450
        else:
10451
          ip = nic.ip
10452
        if constants.INIC_MAC in this_nic_override:
10453
          mac = this_nic_override[constants.INIC_MAC]
10454
        else:
10455
          mac = nic.mac
10456
        if idx in self.nic_pnew:
10457
          nicparams = self.nic_pnew[idx]
10458
        else:
10459
          nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
10460
        mode = nicparams[constants.NIC_MODE]
10461
        link = nicparams[constants.NIC_LINK]
10462
        args["nics"].append((ip, mac, mode, link))
10463
      if constants.DDM_ADD in nic_override:
10464
        ip = nic_override[constants.DDM_ADD].get(constants.INIC_IP, None)
10465
        mac = nic_override[constants.DDM_ADD][constants.INIC_MAC]
10466
        nicparams = self.nic_pnew[constants.DDM_ADD]
10467
        mode = nicparams[constants.NIC_MODE]
10468
        link = nicparams[constants.NIC_LINK]
10469
        args["nics"].append((ip, mac, mode, link))
10470
      elif constants.DDM_REMOVE in nic_override:
10471
        del args["nics"][-1]
10472

    
10473
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
10474
    if self.op.disk_template:
10475
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
10476

    
10477
    return env
10478

    
10479
  def BuildHooksNodes(self):
10480
    """Build hooks nodes.
10481

10482
    """
10483
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
10484
    return (nl, nl)
10485

    
10486
  def CheckPrereq(self):
10487
    """Check prerequisites.
10488

10489
    This only checks the instance list against the existing names.
10490

10491
    """
10492
    # checking the new params on the primary/secondary nodes
10493

    
10494
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
10495
    cluster = self.cluster = self.cfg.GetClusterInfo()
10496
    assert self.instance is not None, \
10497
      "Cannot retrieve locked instance %s" % self.op.instance_name
10498
    pnode = instance.primary_node
10499
    nodelist = list(instance.all_nodes)
10500

    
10501
    # OS change
10502
    if self.op.os_name and not self.op.force:
10503
      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
10504
                      self.op.force_variant)
10505
      instance_os = self.op.os_name
10506
    else:
10507
      instance_os = instance.os
10508

    
10509
    if self.op.disk_template:
10510
      if instance.disk_template == self.op.disk_template:
10511
        raise errors.OpPrereqError("Instance already has disk template %s" %
10512
                                   instance.disk_template, errors.ECODE_INVAL)
10513

    
10514
      if (instance.disk_template,
10515
          self.op.disk_template) not in self._DISK_CONVERSIONS:
10516
        raise errors.OpPrereqError("Unsupported disk template conversion from"
10517
                                   " %s to %s" % (instance.disk_template,
10518
                                                  self.op.disk_template),
10519
                                   errors.ECODE_INVAL)
10520
      _CheckInstanceDown(self, instance, "cannot change disk template")
10521
      if self.op.disk_template in constants.DTS_INT_MIRROR:
10522
        if self.op.remote_node == pnode:
10523
          raise errors.OpPrereqError("Given new secondary node %s is the same"
10524
                                     " as the primary node of the instance" %
10525
                                     self.op.remote_node, errors.ECODE_STATE)
10526
        _CheckNodeOnline(self, self.op.remote_node)
10527
        _CheckNodeNotDrained(self, self.op.remote_node)
10528
        # FIXME: here we assume that the old instance type is DT_PLAIN
10529
        assert instance.disk_template == constants.DT_PLAIN
10530
        disks = [{constants.IDISK_SIZE: d.size,
10531
                  constants.IDISK_VG: d.logical_id[0]}
10532
                 for d in instance.disks]
10533
        required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
10534
        _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
10535

    
10536
    # hvparams processing
10537
    if self.op.hvparams:
10538
      hv_type = instance.hypervisor
10539
      i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
10540
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
10541
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
10542

    
10543
      # local check
10544
      hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
10545
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
10546
      self.hv_new = hv_new # the new actual values
10547
      self.hv_inst = i_hvdict # the new dict (without defaults)
10548
    else:
10549
      self.hv_new = self.hv_inst = {}
10550

    
10551
    # beparams processing
10552
    if self.op.beparams:
10553
      i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
10554
                                   use_none=True)
10555
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
10556
      be_new = cluster.SimpleFillBE(i_bedict)
10557
      self.be_new = be_new # the new actual values
10558
      self.be_inst = i_bedict # the new dict (without defaults)
10559
    else:
10560
      self.be_new = self.be_inst = {}
10561
    be_old = cluster.FillBE(instance)
10562

    
10563
    # osparams processing
10564
    if self.op.osparams:
10565
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
10566
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
10567
      self.os_inst = i_osdict # the new dict (without defaults)
10568
    else:
10569
      self.os_inst = {}
10570

    
10571
    self.warn = []
10572

    
10573
    if (constants.BE_MEMORY in self.op.beparams and not self.op.force and
10574
        be_new[constants.BE_MEMORY] > be_old[constants.BE_MEMORY]):
10575
      mem_check_list = [pnode]
10576
      if be_new[constants.BE_AUTO_BALANCE]:
10577
        # either we changed auto_balance to yes or it was from before
10578
        mem_check_list.extend(instance.secondary_nodes)
10579
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
10580
                                                  instance.hypervisor)
10581
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
10582
                                         instance.hypervisor)
10583
      pninfo = nodeinfo[pnode]
10584
      msg = pninfo.fail_msg
10585
      if msg:
10586
        # Assume the primary node is unreachable and go ahead
10587
        self.warn.append("Can't get info from primary node %s: %s" %
10588
                         (pnode,  msg))
10589
      elif not isinstance(pninfo.payload.get("memory_free", None), int):
10590
        self.warn.append("Node data from primary node %s doesn't contain"
10591
                         " free memory information" % pnode)
10592
      elif instance_info.fail_msg:
10593
        self.warn.append("Can't get instance runtime information: %s" %
10594
                        instance_info.fail_msg)
10595
      else:
10596
        if instance_info.payload:
10597
          current_mem = int(instance_info.payload["memory"])
10598
        else:
10599
          # Assume instance not running
10600
          # (there is a slight race condition here, but it's not very probable,
10601
          # and we have no other way to check)
10602
          current_mem = 0
10603
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
10604
                    pninfo.payload["memory_free"])
10605
        if miss_mem > 0:
10606
          raise errors.OpPrereqError("This change will prevent the instance"
10607
                                     " from starting, due to %d MB of memory"
10608
                                     " missing on its primary node" % miss_mem,
10609
                                     errors.ECODE_NORES)
10610

    
10611
      if be_new[constants.BE_AUTO_BALANCE]:
10612
        for node, nres in nodeinfo.items():
10613
          if node not in instance.secondary_nodes:
10614
            continue
10615
          nres.Raise("Can't get info from secondary node %s" % node,
10616
                     prereq=True, ecode=errors.ECODE_STATE)
10617
          if not isinstance(nres.payload.get("memory_free", None), int):
10618
            raise errors.OpPrereqError("Secondary node %s didn't return free"
10619
                                       " memory information" % node,
10620
                                       errors.ECODE_STATE)
10621
          elif be_new[constants.BE_MEMORY] > nres.payload["memory_free"]:
10622
            raise errors.OpPrereqError("This change will prevent the instance"
10623
                                       " from failover to its secondary node"
10624
                                       " %s, due to not enough memory" % node,
10625
                                       errors.ECODE_STATE)
10626

    
10627
    # NIC processing
10628
    self.nic_pnew = {}
10629
    self.nic_pinst = {}
10630
    for nic_op, nic_dict in self.op.nics:
10631
      if nic_op == constants.DDM_REMOVE:
10632
        if not instance.nics:
10633
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
10634
                                     errors.ECODE_INVAL)
10635
        continue
10636
      if nic_op != constants.DDM_ADD:
10637
        # an existing nic
10638
        if not instance.nics:
10639
          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
10640
                                     " no NICs" % nic_op,
10641
                                     errors.ECODE_INVAL)
10642
        if nic_op < 0 or nic_op >= len(instance.nics):
10643
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
10644
                                     " are 0 to %d" %
10645
                                     (nic_op, len(instance.nics) - 1),
10646
                                     errors.ECODE_INVAL)
10647
        old_nic_params = instance.nics[nic_op].nicparams
10648
        old_nic_ip = instance.nics[nic_op].ip
10649
      else:
10650
        old_nic_params = {}
10651
        old_nic_ip = None
10652

    
10653
      update_params_dict = dict([(key, nic_dict[key])
10654
                                 for key in constants.NICS_PARAMETERS
10655
                                 if key in nic_dict])
10656

    
10657
      if "bridge" in nic_dict:
10658
        update_params_dict[constants.NIC_LINK] = nic_dict["bridge"]
10659

    
10660
      new_nic_params = _GetUpdatedParams(old_nic_params,
10661
                                         update_params_dict)
10662
      utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
10663
      new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
10664
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
10665
      self.nic_pinst[nic_op] = new_nic_params
10666
      self.nic_pnew[nic_op] = new_filled_nic_params
10667
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
10668

    
10669
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
10670
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
10671
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
10672
        if msg:
10673
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
10674
          if self.op.force:
10675
            self.warn.append(msg)
10676
          else:
10677
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
10678
      if new_nic_mode == constants.NIC_MODE_ROUTED:
10679
        if constants.INIC_IP in nic_dict:
10680
          nic_ip = nic_dict[constants.INIC_IP]
10681
        else:
10682
          nic_ip = old_nic_ip
10683
        if nic_ip is None:
10684
          raise errors.OpPrereqError("Cannot set the nic ip to None"
10685
                                     " on a routed nic", errors.ECODE_INVAL)
10686
      if constants.INIC_MAC in nic_dict:
10687
        nic_mac = nic_dict[constants.INIC_MAC]
10688
        if nic_mac is None:
10689
          raise errors.OpPrereqError("Cannot set the nic mac to None",
10690
                                     errors.ECODE_INVAL)
10691
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
10692
          # otherwise generate the mac
10693
          nic_dict[constants.INIC_MAC] = \
10694
            self.cfg.GenerateMAC(self.proc.GetECId())
10695
        else:
10696
          # or validate/reserve the current one
10697
          try:
10698
            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
10699
          except errors.ReservationError:
10700
            raise errors.OpPrereqError("MAC address %s already in use"
10701
                                       " in cluster" % nic_mac,
10702
                                       errors.ECODE_NOTUNIQUE)
10703

    
10704
    # DISK processing
10705
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
10706
      raise errors.OpPrereqError("Disk operations not supported for"
10707
                                 " diskless instances",
10708
                                 errors.ECODE_INVAL)
10709
    for disk_op, _ in self.op.disks:
10710
      if disk_op == constants.DDM_REMOVE:
10711
        if len(instance.disks) == 1:
10712
          raise errors.OpPrereqError("Cannot remove the last disk of"
10713
                                     " an instance", errors.ECODE_INVAL)
10714
        _CheckInstanceDown(self, instance, "cannot remove disks")
10715

    
10716
      if (disk_op == constants.DDM_ADD and
10717
          len(instance.disks) >= constants.MAX_DISKS):
10718
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
10719
                                   " add more" % constants.MAX_DISKS,
10720
                                   errors.ECODE_STATE)
10721
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
10722
        # an existing disk
10723
        if disk_op < 0 or disk_op >= len(instance.disks):
10724
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
10725
                                     " are 0 to %d" %
10726
                                     (disk_op, len(instance.disks)),
10727
                                     errors.ECODE_INVAL)
10728

    
10729
    return
10730

    
10731
  def _ConvertPlainToDrbd(self, feedback_fn):
10732
    """Converts an instance from plain to drbd.
10733

10734
    """
10735
    feedback_fn("Converting template to drbd")
10736
    instance = self.instance
10737
    pnode = instance.primary_node
10738
    snode = self.op.remote_node
10739

    
10740
    # create a fake disk info for _GenerateDiskTemplate
10741
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
10742
                  constants.IDISK_VG: d.logical_id[0]}
10743
                 for d in instance.disks]
10744
    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
10745
                                      instance.name, pnode, [snode],
10746
                                      disk_info, None, None, 0, feedback_fn)
10747
    info = _GetInstanceInfoText(instance)
10748
    feedback_fn("Creating aditional volumes...")
10749
    # first, create the missing data and meta devices
10750
    for disk in new_disks:
10751
      # unfortunately this is... not too nice
10752
      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
10753
                            info, True)
10754
      for child in disk.children:
10755
        _CreateSingleBlockDev(self, snode, instance, child, info, True)
10756
    # at this stage, all new LVs have been created, we can rename the
10757
    # old ones
10758
    feedback_fn("Renaming original volumes...")
10759
    rename_list = [(o, n.children[0].logical_id)
10760
                   for (o, n) in zip(instance.disks, new_disks)]
10761
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
10762
    result.Raise("Failed to rename original LVs")
10763

    
10764
    feedback_fn("Initializing DRBD devices...")
10765
    # all child devices are in place, we can now create the DRBD devices
10766
    for disk in new_disks:
10767
      for node in [pnode, snode]:
10768
        f_create = node == pnode
10769
        _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
10770

    
10771
    # at this point, the instance has been modified
10772
    instance.disk_template = constants.DT_DRBD8
10773
    instance.disks = new_disks
10774
    self.cfg.Update(instance, feedback_fn)
10775

    
10776
    # disks are created, waiting for sync
10777
    disk_abort = not _WaitForSync(self, instance,
10778
                                  oneshot=not self.op.wait_for_sync)
10779
    if disk_abort:
10780
      raise errors.OpExecError("There are some degraded disks for"
10781
                               " this instance, please cleanup manually")
10782

    
10783
  def _ConvertDrbdToPlain(self, feedback_fn):
10784
    """Converts an instance from drbd to plain.
10785

10786
    """
10787
    instance = self.instance
10788
    assert len(instance.secondary_nodes) == 1
10789
    pnode = instance.primary_node
10790
    snode = instance.secondary_nodes[0]
10791
    feedback_fn("Converting template to plain")
10792

    
10793
    old_disks = instance.disks
10794
    new_disks = [d.children[0] for d in old_disks]
10795

    
10796
    # copy over size and mode
10797
    for parent, child in zip(old_disks, new_disks):
10798
      child.size = parent.size
10799
      child.mode = parent.mode
10800

    
10801
    # update instance structure
10802
    instance.disks = new_disks
10803
    instance.disk_template = constants.DT_PLAIN
10804
    self.cfg.Update(instance, feedback_fn)
10805

    
10806
    feedback_fn("Removing volumes on the secondary node...")
10807
    for disk in old_disks:
10808
      self.cfg.SetDiskID(disk, snode)
10809
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
10810
      if msg:
10811
        self.LogWarning("Could not remove block device %s on node %s,"
10812
                        " continuing anyway: %s", disk.iv_name, snode, msg)
10813

    
10814
    feedback_fn("Removing unneeded volumes on the primary node...")
10815
    for idx, disk in enumerate(old_disks):
10816
      meta = disk.children[1]
10817
      self.cfg.SetDiskID(meta, pnode)
10818
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
10819
      if msg:
10820
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
10821
                        " continuing anyway: %s", idx, pnode, msg)
10822

    
10823
  def Exec(self, feedback_fn):
10824
    """Modifies an instance.
10825

10826
    All parameters take effect only at the next restart of the instance.
10827

10828
    """
10829
    # Process here the warnings from CheckPrereq, as we don't have a
10830
    # feedback_fn there.
10831
    for warn in self.warn:
10832
      feedback_fn("WARNING: %s" % warn)
10833

    
10834
    result = []
10835
    instance = self.instance
10836
    # disk changes
10837
    for disk_op, disk_dict in self.op.disks:
10838
      if disk_op == constants.DDM_REMOVE:
10839
        # remove the last disk
10840
        device = instance.disks.pop()
10841
        device_idx = len(instance.disks)
10842
        for node, disk in device.ComputeNodeTree(instance.primary_node):
10843
          self.cfg.SetDiskID(disk, node)
10844
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
10845
          if msg:
10846
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
10847
                            " continuing anyway", device_idx, node, msg)
10848
        result.append(("disk/%d" % device_idx, "remove"))
10849
      elif disk_op == constants.DDM_ADD:
10850
        # add a new disk
10851
        if instance.disk_template in (constants.DT_FILE,
10852
                                        constants.DT_SHARED_FILE):
10853
          file_driver, file_path = instance.disks[0].logical_id
10854
          file_path = os.path.dirname(file_path)
10855
        else:
10856
          file_driver = file_path = None
10857
        disk_idx_base = len(instance.disks)
10858
        new_disk = _GenerateDiskTemplate(self,
10859
                                         instance.disk_template,
10860
                                         instance.name, instance.primary_node,
10861
                                         instance.secondary_nodes,
10862
                                         [disk_dict],
10863
                                         file_path,
10864
                                         file_driver,
10865
                                         disk_idx_base, feedback_fn)[0]
10866
        instance.disks.append(new_disk)
10867
        info = _GetInstanceInfoText(instance)
10868

    
10869
        logging.info("Creating volume %s for instance %s",
10870
                     new_disk.iv_name, instance.name)
10871
        # Note: this needs to be kept in sync with _CreateDisks
10872
        #HARDCODE
10873
        for node in instance.all_nodes:
10874
          f_create = node == instance.primary_node
10875
          try:
10876
            _CreateBlockDev(self, node, instance, new_disk,
10877
                            f_create, info, f_create)
10878
          except errors.OpExecError, err:
10879
            self.LogWarning("Failed to create volume %s (%s) on"
10880
                            " node %s: %s",
10881
                            new_disk.iv_name, new_disk, node, err)
10882
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
10883
                       (new_disk.size, new_disk.mode)))
10884
      else:
10885
        # change a given disk
10886
        instance.disks[disk_op].mode = disk_dict[constants.IDISK_MODE]
10887
        result.append(("disk.mode/%d" % disk_op,
10888
                       disk_dict[constants.IDISK_MODE]))
10889

    
10890
    if self.op.disk_template:
10891
      r_shut = _ShutdownInstanceDisks(self, instance)
10892
      if not r_shut:
10893
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
10894
                                 " proceed with disk template conversion")
10895
      mode = (instance.disk_template, self.op.disk_template)
10896
      try:
10897
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
10898
      except:
10899
        self.cfg.ReleaseDRBDMinors(instance.name)
10900
        raise
10901
      result.append(("disk_template", self.op.disk_template))
10902

    
10903
    # NIC changes
10904
    for nic_op, nic_dict in self.op.nics:
10905
      if nic_op == constants.DDM_REMOVE:
10906
        # remove the last nic
10907
        del instance.nics[-1]
10908
        result.append(("nic.%d" % len(instance.nics), "remove"))
10909
      elif nic_op == constants.DDM_ADD:
10910
        # mac and bridge should be set, by now
10911
        mac = nic_dict[constants.INIC_MAC]
10912
        ip = nic_dict.get(constants.INIC_IP, None)
10913
        nicparams = self.nic_pinst[constants.DDM_ADD]
10914
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
10915
        instance.nics.append(new_nic)
10916
        result.append(("nic.%d" % (len(instance.nics) - 1),
10917
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
10918
                       (new_nic.mac, new_nic.ip,
10919
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
10920
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
10921
                       )))
10922
      else:
10923
        for key in (constants.INIC_MAC, constants.INIC_IP):
10924
          if key in nic_dict:
10925
            setattr(instance.nics[nic_op], key, nic_dict[key])
10926
        if nic_op in self.nic_pinst:
10927
          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
10928
        for key, val in nic_dict.iteritems():
10929
          result.append(("nic.%s/%d" % (key, nic_op), val))
10930

    
10931
    # hvparams changes
10932
    if self.op.hvparams:
10933
      instance.hvparams = self.hv_inst
10934
      for key, val in self.op.hvparams.iteritems():
10935
        result.append(("hv/%s" % key, val))
10936

    
10937
    # beparams changes
10938
    if self.op.beparams:
10939
      instance.beparams = self.be_inst
10940
      for key, val in self.op.beparams.iteritems():
10941
        result.append(("be/%s" % key, val))
10942

    
10943
    # OS change
10944
    if self.op.os_name:
10945
      instance.os = self.op.os_name
10946

    
10947
    # osparams changes
10948
    if self.op.osparams:
10949
      instance.osparams = self.os_inst
10950
      for key, val in self.op.osparams.iteritems():
10951
        result.append(("os/%s" % key, val))
10952

    
10953
    self.cfg.Update(instance, feedback_fn)
10954

    
10955
    return result
10956

    
10957
  _DISK_CONVERSIONS = {
10958
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
10959
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
10960
    }
10961

    
10962

    
10963
class LUBackupQuery(NoHooksLU):
10964
  """Query the exports list
10965

10966
  """
10967
  REQ_BGL = False
10968

    
10969
  def ExpandNames(self):
10970
    self.needed_locks = {}
10971
    self.share_locks[locking.LEVEL_NODE] = 1
10972
    if not self.op.nodes:
10973
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10974
    else:
10975
      self.needed_locks[locking.LEVEL_NODE] = \
10976
        _GetWantedNodes(self, self.op.nodes)
10977

    
10978
  def Exec(self, feedback_fn):
10979
    """Compute the list of all the exported system images.
10980

10981
    @rtype: dict
10982
    @return: a dictionary with the structure node->(export-list)
10983
        where export-list is a list of the instances exported on
10984
        that node.
10985

10986
    """
10987
    self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
10988
    rpcresult = self.rpc.call_export_list(self.nodes)
10989
    result = {}
10990
    for node in rpcresult:
10991
      if rpcresult[node].fail_msg:
10992
        result[node] = False
10993
      else:
10994
        result[node] = rpcresult[node].payload
10995

    
10996
    return result
10997

    
10998

    
10999
class LUBackupPrepare(NoHooksLU):
11000
  """Prepares an instance for an export and returns useful information.
11001

11002
  """
11003
  REQ_BGL = False
11004

    
11005
  def ExpandNames(self):
11006
    self._ExpandAndLockInstance()
11007

    
11008
  def CheckPrereq(self):
11009
    """Check prerequisites.
11010

11011
    """
11012
    instance_name = self.op.instance_name
11013

    
11014
    self.instance = self.cfg.GetInstanceInfo(instance_name)
11015
    assert self.instance is not None, \
11016
          "Cannot retrieve locked instance %s" % self.op.instance_name
11017
    _CheckNodeOnline(self, self.instance.primary_node)
11018

    
11019
    self._cds = _GetClusterDomainSecret()
11020

    
11021
  def Exec(self, feedback_fn):
11022
    """Prepares an instance for an export.
11023

11024
    """
11025
    instance = self.instance
11026

    
11027
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
11028
      salt = utils.GenerateSecret(8)
11029

    
11030
      feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
11031
      result = self.rpc.call_x509_cert_create(instance.primary_node,
11032
                                              constants.RIE_CERT_VALIDITY)
11033
      result.Raise("Can't create X509 key and certificate on %s" % result.node)
11034

    
11035
      (name, cert_pem) = result.payload
11036

    
11037
      cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
11038
                                             cert_pem)
11039

    
11040
      return {
11041
        "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
11042
        "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
11043
                          salt),
11044
        "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
11045
        }
11046

    
11047
    return None
11048

    
11049

    
11050
class LUBackupExport(LogicalUnit):
11051
  """Export an instance to an image in the cluster.
11052

11053
  """
11054
  HPATH = "instance-export"
11055
  HTYPE = constants.HTYPE_INSTANCE
11056
  REQ_BGL = False
11057

    
11058
  def CheckArguments(self):
11059
    """Check the arguments.
11060

11061
    """
11062
    self.x509_key_name = self.op.x509_key_name
11063
    self.dest_x509_ca_pem = self.op.destination_x509_ca
11064

    
11065
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
11066
      if not self.x509_key_name:
11067
        raise errors.OpPrereqError("Missing X509 key name for encryption",
11068
                                   errors.ECODE_INVAL)
11069

    
11070
      if not self.dest_x509_ca_pem:
11071
        raise errors.OpPrereqError("Missing destination X509 CA",
11072
                                   errors.ECODE_INVAL)
11073

    
11074
  def ExpandNames(self):
11075
    self._ExpandAndLockInstance()
11076

    
11077
    # Lock all nodes for local exports
11078
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
11079
      # FIXME: lock only instance primary and destination node
11080
      #
11081
      # Sad but true, for now we have do lock all nodes, as we don't know where
11082
      # the previous export might be, and in this LU we search for it and
11083
      # remove it from its current node. In the future we could fix this by:
11084
      #  - making a tasklet to search (share-lock all), then create the
11085
      #    new one, then one to remove, after
11086
      #  - removing the removal operation altogether
11087
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
11088

    
11089
  def DeclareLocks(self, level):
11090
    """Last minute lock declaration."""
11091
    # All nodes are locked anyway, so nothing to do here.
11092

    
11093
  def BuildHooksEnv(self):
11094
    """Build hooks env.
11095

11096
    This will run on the master, primary node and target node.
11097

11098
    """
11099
    env = {
11100
      "EXPORT_MODE": self.op.mode,
11101
      "EXPORT_NODE": self.op.target_node,
11102
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
11103
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
11104
      # TODO: Generic function for boolean env variables
11105
      "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
11106
      }
11107

    
11108
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
11109

    
11110
    return env
11111

    
11112
  def BuildHooksNodes(self):
11113
    """Build hooks nodes.
11114

11115
    """
11116
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
11117

    
11118
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
11119
      nl.append(self.op.target_node)
11120

    
11121
    return (nl, nl)
11122

    
11123
  def CheckPrereq(self):
11124
    """Check prerequisites.
11125

11126
    This checks that the instance and node names are valid.
11127

11128
    """
11129
    instance_name = self.op.instance_name
11130

    
11131
    self.instance = self.cfg.GetInstanceInfo(instance_name)
11132
    assert self.instance is not None, \
11133
          "Cannot retrieve locked instance %s" % self.op.instance_name
11134
    _CheckNodeOnline(self, self.instance.primary_node)
11135

    
11136
    if (self.op.remove_instance and self.instance.admin_up and
11137
        not self.op.shutdown):
11138
      raise errors.OpPrereqError("Can not remove instance without shutting it"
11139
                                 " down before")
11140

    
11141
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
11142
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
11143
      self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
11144
      assert self.dst_node is not None
11145

    
11146
      _CheckNodeOnline(self, self.dst_node.name)
11147
      _CheckNodeNotDrained(self, self.dst_node.name)
11148

    
11149
      self._cds = None
11150
      self.dest_disk_info = None
11151
      self.dest_x509_ca = None
11152

    
11153
    elif self.op.mode == constants.EXPORT_MODE_REMOTE:
11154
      self.dst_node = None
11155

    
11156
      if len(self.op.target_node) != len(self.instance.disks):
11157
        raise errors.OpPrereqError(("Received destination information for %s"
11158
                                    " disks, but instance %s has %s disks") %
11159
                                   (len(self.op.target_node), instance_name,
11160
                                    len(self.instance.disks)),
11161
                                   errors.ECODE_INVAL)
11162

    
11163
      cds = _GetClusterDomainSecret()
11164

    
11165
      # Check X509 key name
11166
      try:
11167
        (key_name, hmac_digest, hmac_salt) = self.x509_key_name
11168
      except (TypeError, ValueError), err:
11169
        raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
11170

    
11171
      if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
11172
        raise errors.OpPrereqError("HMAC for X509 key name is wrong",
11173
                                   errors.ECODE_INVAL)
11174

    
11175
      # Load and verify CA
11176
      try:
11177
        (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
11178
      except OpenSSL.crypto.Error, err:
11179
        raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
11180
                                   (err, ), errors.ECODE_INVAL)
11181

    
11182
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
11183
      if errcode is not None:
11184
        raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
11185
                                   (msg, ), errors.ECODE_INVAL)
11186

    
11187
      self.dest_x509_ca = cert
11188

    
11189
      # Verify target information
11190
      disk_info = []
11191
      for idx, disk_data in enumerate(self.op.target_node):
11192
        try:
11193
          (host, port, magic) = \
11194
            masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
11195
        except errors.GenericError, err:
11196
          raise errors.OpPrereqError("Target info for disk %s: %s" %
11197
                                     (idx, err), errors.ECODE_INVAL)
11198

    
11199
        disk_info.append((host, port, magic))
11200

    
11201
      assert len(disk_info) == len(self.op.target_node)
11202
      self.dest_disk_info = disk_info
11203

    
11204
    else:
11205
      raise errors.ProgrammerError("Unhandled export mode %r" %
11206
                                   self.op.mode)
11207

    
11208
    # instance disk type verification
11209
    # TODO: Implement export support for file-based disks
11210
    for disk in self.instance.disks:
11211
      if disk.dev_type == constants.LD_FILE:
11212
        raise errors.OpPrereqError("Export not supported for instances with"
11213
                                   " file-based disks", errors.ECODE_INVAL)
11214

    
11215
  def _CleanupExports(self, feedback_fn):
11216
    """Removes exports of current instance from all other nodes.
11217

11218
    If an instance in a cluster with nodes A..D was exported to node C, its
11219
    exports will be removed from the nodes A, B and D.
11220

11221
    """
11222
    assert self.op.mode != constants.EXPORT_MODE_REMOTE
11223

    
11224
    nodelist = self.cfg.GetNodeList()
11225
    nodelist.remove(self.dst_node.name)
11226

    
11227
    # on one-node clusters nodelist will be empty after the removal
11228
    # if we proceed the backup would be removed because OpBackupQuery
11229
    # substitutes an empty list with the full cluster node list.
11230
    iname = self.instance.name
11231
    if nodelist:
11232
      feedback_fn("Removing old exports for instance %s" % iname)
11233
      exportlist = self.rpc.call_export_list(nodelist)
11234
      for node in exportlist:
11235
        if exportlist[node].fail_msg:
11236
          continue
11237
        if iname in exportlist[node].payload:
11238
          msg = self.rpc.call_export_remove(node, iname).fail_msg
11239
          if msg:
11240
            self.LogWarning("Could not remove older export for instance %s"
11241
                            " on node %s: %s", iname, node, msg)
11242

    
11243
  def Exec(self, feedback_fn):
11244
    """Export an instance to an image in the cluster.
11245

11246
    """
11247
    assert self.op.mode in constants.EXPORT_MODES
11248

    
11249
    instance = self.instance
11250
    src_node = instance.primary_node
11251

    
11252
    if self.op.shutdown:
11253
      # shutdown the instance, but not the disks
11254
      feedback_fn("Shutting down instance %s" % instance.name)
11255
      result = self.rpc.call_instance_shutdown(src_node, instance,
11256
                                               self.op.shutdown_timeout)
11257
      # TODO: Maybe ignore failures if ignore_remove_failures is set
11258
      result.Raise("Could not shutdown instance %s on"
11259
                   " node %s" % (instance.name, src_node))
11260

    
11261
    # set the disks ID correctly since call_instance_start needs the
11262
    # correct drbd minor to create the symlinks
11263
    for disk in instance.disks:
11264
      self.cfg.SetDiskID(disk, src_node)
11265

    
11266
    activate_disks = (not instance.admin_up)
11267

    
11268
    if activate_disks:
11269
      # Activate the instance disks if we'exporting a stopped instance
11270
      feedback_fn("Activating disks for %s" % instance.name)
11271
      _StartInstanceDisks(self, instance, None)
11272

    
11273
    try:
11274
      helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
11275
                                                     instance)
11276

    
11277
      helper.CreateSnapshots()
11278
      try:
11279
        if (self.op.shutdown and instance.admin_up and
11280
            not self.op.remove_instance):
11281
          assert not activate_disks
11282
          feedback_fn("Starting instance %s" % instance.name)
11283
          result = self.rpc.call_instance_start(src_node, instance,
11284
                                                None, None, False)
11285
          msg = result.fail_msg
11286
          if msg:
11287
            feedback_fn("Failed to start instance: %s" % msg)
11288
            _ShutdownInstanceDisks(self, instance)
11289
            raise errors.OpExecError("Could not start instance: %s" % msg)
11290

    
11291
        if self.op.mode == constants.EXPORT_MODE_LOCAL:
11292
          (fin_resu, dresults) = helper.LocalExport(self.dst_node)
11293
        elif self.op.mode == constants.EXPORT_MODE_REMOTE:
11294
          connect_timeout = constants.RIE_CONNECT_TIMEOUT
11295
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
11296

    
11297
          (key_name, _, _) = self.x509_key_name
11298

    
11299
          dest_ca_pem = \
11300
            OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
11301
                                            self.dest_x509_ca)
11302

    
11303
          (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
11304
                                                     key_name, dest_ca_pem,
11305
                                                     timeouts)
11306
      finally:
11307
        helper.Cleanup()
11308

    
11309
      # Check for backwards compatibility
11310
      assert len(dresults) == len(instance.disks)
11311
      assert compat.all(isinstance(i, bool) for i in dresults), \
11312
             "Not all results are boolean: %r" % dresults
11313

    
11314
    finally:
11315
      if activate_disks:
11316
        feedback_fn("Deactivating disks for %s" % instance.name)
11317
        _ShutdownInstanceDisks(self, instance)
11318

    
11319
    if not (compat.all(dresults) and fin_resu):
11320
      failures = []
11321
      if not fin_resu:
11322
        failures.append("export finalization")
11323
      if not compat.all(dresults):
11324
        fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
11325
                               if not dsk)
11326
        failures.append("disk export: disk(s) %s" % fdsk)
11327

    
11328
      raise errors.OpExecError("Export failed, errors in %s" %
11329
                               utils.CommaJoin(failures))
11330

    
11331
    # At this point, the export was successful, we can cleanup/finish
11332

    
11333
    # Remove instance if requested
11334
    if self.op.remove_instance:
11335
      feedback_fn("Removing instance %s" % instance.name)
11336
      _RemoveInstance(self, feedback_fn, instance,
11337
                      self.op.ignore_remove_failures)
11338

    
11339
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
11340
      self._CleanupExports(feedback_fn)
11341

    
11342
    return fin_resu, dresults
11343

    
11344

    
11345
class LUBackupRemove(NoHooksLU):
11346
  """Remove exports related to the named instance.
11347

11348
  """
11349
  REQ_BGL = False
11350

    
11351
  def ExpandNames(self):
11352
    self.needed_locks = {}
11353
    # We need all nodes to be locked in order for RemoveExport to work, but we
11354
    # don't need to lock the instance itself, as nothing will happen to it (and
11355
    # we can remove exports also for a removed instance)
11356
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
11357

    
11358
  def Exec(self, feedback_fn):
11359
    """Remove any export.
11360

11361
    """
11362
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
11363
    # If the instance was not found we'll try with the name that was passed in.
11364
    # This will only work if it was an FQDN, though.
11365
    fqdn_warn = False
11366
    if not instance_name:
11367
      fqdn_warn = True
11368
      instance_name = self.op.instance_name
11369

    
11370
    locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
11371
    exportlist = self.rpc.call_export_list(locked_nodes)
11372
    found = False
11373
    for node in exportlist:
11374
      msg = exportlist[node].fail_msg
11375
      if msg:
11376
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
11377
        continue
11378
      if instance_name in exportlist[node].payload:
11379
        found = True
11380
        result = self.rpc.call_export_remove(node, instance_name)
11381
        msg = result.fail_msg
11382
        if msg:
11383
          logging.error("Could not remove export for instance %s"
11384
                        " on node %s: %s", instance_name, node, msg)
11385

    
11386
    if fqdn_warn and not found:
11387
      feedback_fn("Export not found. If trying to remove an export belonging"
11388
                  " to a deleted instance please use its Fully Qualified"
11389
                  " Domain Name.")
11390

    
11391

    
11392
class LUGroupAdd(LogicalUnit):
11393
  """Logical unit for creating node groups.
11394

11395
  """
11396
  HPATH = "group-add"
11397
  HTYPE = constants.HTYPE_GROUP
11398
  REQ_BGL = False
11399

    
11400
  def ExpandNames(self):
11401
    # We need the new group's UUID here so that we can create and acquire the
11402
    # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
11403
    # that it should not check whether the UUID exists in the configuration.
11404
    self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
11405
    self.needed_locks = {}
11406
    self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
11407

    
11408
  def CheckPrereq(self):
11409
    """Check prerequisites.
11410

11411
    This checks that the given group name is not an existing node group
11412
    already.
11413

11414
    """
11415
    try:
11416
      existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11417
    except errors.OpPrereqError:
11418
      pass
11419
    else:
11420
      raise errors.OpPrereqError("Desired group name '%s' already exists as a"
11421
                                 " node group (UUID: %s)" %
11422
                                 (self.op.group_name, existing_uuid),
11423
                                 errors.ECODE_EXISTS)
11424

    
11425
    if self.op.ndparams:
11426
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
11427

    
11428
  def BuildHooksEnv(self):
11429
    """Build hooks env.
11430

11431
    """
11432
    return {
11433
      "GROUP_NAME": self.op.group_name,
11434
      }
11435

    
11436
  def BuildHooksNodes(self):
11437
    """Build hooks nodes.
11438

11439
    """
11440
    mn = self.cfg.GetMasterNode()
11441
    return ([mn], [mn])
11442

    
11443
  def Exec(self, feedback_fn):
11444
    """Add the node group to the cluster.
11445

11446
    """
11447
    group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
11448
                                  uuid=self.group_uuid,
11449
                                  alloc_policy=self.op.alloc_policy,
11450
                                  ndparams=self.op.ndparams)
11451

    
11452
    self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
11453
    del self.remove_locks[locking.LEVEL_NODEGROUP]
11454

    
11455

    
11456
class LUGroupAssignNodes(NoHooksLU):
11457
  """Logical unit for assigning nodes to groups.
11458

11459
  """
11460
  REQ_BGL = False
11461

    
11462
  def ExpandNames(self):
11463
    # These raise errors.OpPrereqError on their own:
11464
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11465
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
11466

    
11467
    # We want to lock all the affected nodes and groups. We have readily
11468
    # available the list of nodes, and the *destination* group. To gather the
11469
    # list of "source" groups, we need to fetch node information later on.
11470
    self.needed_locks = {
11471
      locking.LEVEL_NODEGROUP: set([self.group_uuid]),
11472
      locking.LEVEL_NODE: self.op.nodes,
11473
      }
11474

    
11475
  def DeclareLocks(self, level):
11476
    if level == locking.LEVEL_NODEGROUP:
11477
      assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
11478

    
11479
      # Try to get all affected nodes' groups without having the group or node
11480
      # lock yet. Needs verification later in the code flow.
11481
      groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes)
11482

    
11483
      self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
11484

    
11485
  def CheckPrereq(self):
11486
    """Check prerequisites.
11487

11488
    """
11489
    assert self.needed_locks[locking.LEVEL_NODEGROUP]
11490
    assert (frozenset(self.glm.list_owned(locking.LEVEL_NODE)) ==
11491
            frozenset(self.op.nodes))
11492

    
11493
    expected_locks = (set([self.group_uuid]) |
11494
                      self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
11495
    actual_locks = self.glm.list_owned(locking.LEVEL_NODEGROUP)
11496
    if actual_locks != expected_locks:
11497
      raise errors.OpExecError("Nodes changed groups since locks were acquired,"
11498
                               " current groups are '%s', used to be '%s'" %
11499
                               (utils.CommaJoin(expected_locks),
11500
                                utils.CommaJoin(actual_locks)))
11501

    
11502
    self.node_data = self.cfg.GetAllNodesInfo()
11503
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
11504
    instance_data = self.cfg.GetAllInstancesInfo()
11505

    
11506
    if self.group is None:
11507
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
11508
                               (self.op.group_name, self.group_uuid))
11509

    
11510
    (new_splits, previous_splits) = \
11511
      self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
11512
                                             for node in self.op.nodes],
11513
                                            self.node_data, instance_data)
11514

    
11515
    if new_splits:
11516
      fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
11517

    
11518
      if not self.op.force:
11519
        raise errors.OpExecError("The following instances get split by this"
11520
                                 " change and --force was not given: %s" %
11521
                                 fmt_new_splits)
11522
      else:
11523
        self.LogWarning("This operation will split the following instances: %s",
11524
                        fmt_new_splits)
11525

    
11526
        if previous_splits:
11527
          self.LogWarning("In addition, these already-split instances continue"
11528
                          " to be split across groups: %s",
11529
                          utils.CommaJoin(utils.NiceSort(previous_splits)))
11530

    
11531
  def Exec(self, feedback_fn):
11532
    """Assign nodes to a new group.
11533

11534
    """
11535
    for node in self.op.nodes:
11536
      self.node_data[node].group = self.group_uuid
11537

    
11538
    # FIXME: Depends on side-effects of modifying the result of
11539
    # C{cfg.GetAllNodesInfo}
11540

    
11541
    self.cfg.Update(self.group, feedback_fn) # Saves all modified nodes.
11542

    
11543
  @staticmethod
11544
  def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
11545
    """Check for split instances after a node assignment.
11546

11547
    This method considers a series of node assignments as an atomic operation,
11548
    and returns information about split instances after applying the set of
11549
    changes.
11550

11551
    In particular, it returns information about newly split instances, and
11552
    instances that were already split, and remain so after the change.
11553

11554
    Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
11555
    considered.
11556

11557
    @type changes: list of (node_name, new_group_uuid) pairs.
11558
    @param changes: list of node assignments to consider.
11559
    @param node_data: a dict with data for all nodes
11560
    @param instance_data: a dict with all instances to consider
11561
    @rtype: a two-tuple
11562
    @return: a list of instances that were previously okay and result split as a
11563
      consequence of this change, and a list of instances that were previously
11564
      split and this change does not fix.
11565

11566
    """
11567
    changed_nodes = dict((node, group) for node, group in changes
11568
                         if node_data[node].group != group)
11569

    
11570
    all_split_instances = set()
11571
    previously_split_instances = set()
11572

    
11573
    def InstanceNodes(instance):
11574
      return [instance.primary_node] + list(instance.secondary_nodes)
11575

    
11576
    for inst in instance_data.values():
11577
      if inst.disk_template not in constants.DTS_INT_MIRROR:
11578
        continue
11579

    
11580
      instance_nodes = InstanceNodes(inst)
11581

    
11582
      if len(set(node_data[node].group for node in instance_nodes)) > 1:
11583
        previously_split_instances.add(inst.name)
11584

    
11585
      if len(set(changed_nodes.get(node, node_data[node].group)
11586
                 for node in instance_nodes)) > 1:
11587
        all_split_instances.add(inst.name)
11588

    
11589
    return (list(all_split_instances - previously_split_instances),
11590
            list(previously_split_instances & all_split_instances))
11591

    
11592

    
11593
class _GroupQuery(_QueryBase):
11594
  FIELDS = query.GROUP_FIELDS
11595

    
11596
  def ExpandNames(self, lu):
11597
    lu.needed_locks = {}
11598

    
11599
    self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
11600
    name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
11601

    
11602
    if not self.names:
11603
      self.wanted = [name_to_uuid[name]
11604
                     for name in utils.NiceSort(name_to_uuid.keys())]
11605
    else:
11606
      # Accept names to be either names or UUIDs.
11607
      missing = []
11608
      self.wanted = []
11609
      all_uuid = frozenset(self._all_groups.keys())
11610

    
11611
      for name in self.names:
11612
        if name in all_uuid:
11613
          self.wanted.append(name)
11614
        elif name in name_to_uuid:
11615
          self.wanted.append(name_to_uuid[name])
11616
        else:
11617
          missing.append(name)
11618

    
11619
      if missing:
11620
        raise errors.OpPrereqError("Some groups do not exist: %s" %
11621
                                   utils.CommaJoin(missing),
11622
                                   errors.ECODE_NOENT)
11623

    
11624
  def DeclareLocks(self, lu, level):
11625
    pass
11626

    
11627
  def _GetQueryData(self, lu):
11628
    """Computes the list of node groups and their attributes.
11629

11630
    """
11631
    do_nodes = query.GQ_NODE in self.requested_data
11632
    do_instances = query.GQ_INST in self.requested_data
11633

    
11634
    group_to_nodes = None
11635
    group_to_instances = None
11636

    
11637
    # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
11638
    # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
11639
    # latter GetAllInstancesInfo() is not enough, for we have to go through
11640
    # instance->node. Hence, we will need to process nodes even if we only need
11641
    # instance information.
11642
    if do_nodes or do_instances:
11643
      all_nodes = lu.cfg.GetAllNodesInfo()
11644
      group_to_nodes = dict((uuid, []) for uuid in self.wanted)
11645
      node_to_group = {}
11646

    
11647
      for node in all_nodes.values():
11648
        if node.group in group_to_nodes:
11649
          group_to_nodes[node.group].append(node.name)
11650
          node_to_group[node.name] = node.group
11651

    
11652
      if do_instances:
11653
        all_instances = lu.cfg.GetAllInstancesInfo()
11654
        group_to_instances = dict((uuid, []) for uuid in self.wanted)
11655

    
11656
        for instance in all_instances.values():
11657
          node = instance.primary_node
11658
          if node in node_to_group:
11659
            group_to_instances[node_to_group[node]].append(instance.name)
11660

    
11661
        if not do_nodes:
11662
          # Do not pass on node information if it was not requested.
11663
          group_to_nodes = None
11664

    
11665
    return query.GroupQueryData([self._all_groups[uuid]
11666
                                 for uuid in self.wanted],
11667
                                group_to_nodes, group_to_instances)
11668

    
11669

    
11670
class LUGroupQuery(NoHooksLU):
11671
  """Logical unit for querying node groups.
11672

11673
  """
11674
  REQ_BGL = False
11675

    
11676
  def CheckArguments(self):
11677
    self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
11678
                          self.op.output_fields, False)
11679

    
11680
  def ExpandNames(self):
11681
    self.gq.ExpandNames(self)
11682

    
11683
  def Exec(self, feedback_fn):
11684
    return self.gq.OldStyleQuery(self)
11685

    
11686

    
11687
class LUGroupSetParams(LogicalUnit):
11688
  """Modifies the parameters of a node group.
11689

11690
  """
11691
  HPATH = "group-modify"
11692
  HTYPE = constants.HTYPE_GROUP
11693
  REQ_BGL = False
11694

    
11695
  def CheckArguments(self):
11696
    all_changes = [
11697
      self.op.ndparams,
11698
      self.op.alloc_policy,
11699
      ]
11700

    
11701
    if all_changes.count(None) == len(all_changes):
11702
      raise errors.OpPrereqError("Please pass at least one modification",
11703
                                 errors.ECODE_INVAL)
11704

    
11705
  def ExpandNames(self):
11706
    # This raises errors.OpPrereqError on its own:
11707
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11708

    
11709
    self.needed_locks = {
11710
      locking.LEVEL_NODEGROUP: [self.group_uuid],
11711
      }
11712

    
11713
  def CheckPrereq(self):
11714
    """Check prerequisites.
11715

11716
    """
11717
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
11718

    
11719
    if self.group is None:
11720
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
11721
                               (self.op.group_name, self.group_uuid))
11722

    
11723
    if self.op.ndparams:
11724
      new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
11725
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
11726
      self.new_ndparams = new_ndparams
11727

    
11728
  def BuildHooksEnv(self):
11729
    """Build hooks env.
11730

11731
    """
11732
    return {
11733
      "GROUP_NAME": self.op.group_name,
11734
      "NEW_ALLOC_POLICY": self.op.alloc_policy,
11735
      }
11736

    
11737
  def BuildHooksNodes(self):
11738
    """Build hooks nodes.
11739

11740
    """
11741
    mn = self.cfg.GetMasterNode()
11742
    return ([mn], [mn])
11743

    
11744
  def Exec(self, feedback_fn):
11745
    """Modifies the node group.
11746

11747
    """
11748
    result = []
11749

    
11750
    if self.op.ndparams:
11751
      self.group.ndparams = self.new_ndparams
11752
      result.append(("ndparams", str(self.group.ndparams)))
11753

    
11754
    if self.op.alloc_policy:
11755
      self.group.alloc_policy = self.op.alloc_policy
11756

    
11757
    self.cfg.Update(self.group, feedback_fn)
11758
    return result
11759

    
11760

    
11761

    
11762
class LUGroupRemove(LogicalUnit):
11763
  HPATH = "group-remove"
11764
  HTYPE = constants.HTYPE_GROUP
11765
  REQ_BGL = False
11766

    
11767
  def ExpandNames(self):
11768
    # This will raises errors.OpPrereqError on its own:
11769
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11770
    self.needed_locks = {
11771
      locking.LEVEL_NODEGROUP: [self.group_uuid],
11772
      }
11773

    
11774
  def CheckPrereq(self):
11775
    """Check prerequisites.
11776

11777
    This checks that the given group name exists as a node group, that is
11778
    empty (i.e., contains no nodes), and that is not the last group of the
11779
    cluster.
11780

11781
    """
11782
    # Verify that the group is empty.
11783
    group_nodes = [node.name
11784
                   for node in self.cfg.GetAllNodesInfo().values()
11785
                   if node.group == self.group_uuid]
11786

    
11787
    if group_nodes:
11788
      raise errors.OpPrereqError("Group '%s' not empty, has the following"
11789
                                 " nodes: %s" %
11790
                                 (self.op.group_name,
11791
                                  utils.CommaJoin(utils.NiceSort(group_nodes))),
11792
                                 errors.ECODE_STATE)
11793

    
11794
    # Verify the cluster would not be left group-less.
11795
    if len(self.cfg.GetNodeGroupList()) == 1:
11796
      raise errors.OpPrereqError("Group '%s' is the only group,"
11797
                                 " cannot be removed" %
11798
                                 self.op.group_name,
11799
                                 errors.ECODE_STATE)
11800

    
11801
  def BuildHooksEnv(self):
11802
    """Build hooks env.
11803

11804
    """
11805
    return {
11806
      "GROUP_NAME": self.op.group_name,
11807
      }
11808

    
11809
  def BuildHooksNodes(self):
11810
    """Build hooks nodes.
11811

11812
    """
11813
    mn = self.cfg.GetMasterNode()
11814
    return ([mn], [mn])
11815

    
11816
  def Exec(self, feedback_fn):
11817
    """Remove the node group.
11818

11819
    """
11820
    try:
11821
      self.cfg.RemoveNodeGroup(self.group_uuid)
11822
    except errors.ConfigurationError:
11823
      raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
11824
                               (self.op.group_name, self.group_uuid))
11825

    
11826
    self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
11827

    
11828

    
11829
class LUGroupRename(LogicalUnit):
11830
  HPATH = "group-rename"
11831
  HTYPE = constants.HTYPE_GROUP
11832
  REQ_BGL = False
11833

    
11834
  def ExpandNames(self):
11835
    # This raises errors.OpPrereqError on its own:
11836
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11837

    
11838
    self.needed_locks = {
11839
      locking.LEVEL_NODEGROUP: [self.group_uuid],
11840
      }
11841

    
11842
  def CheckPrereq(self):
11843
    """Check prerequisites.
11844

11845
    Ensures requested new name is not yet used.
11846

11847
    """
11848
    try:
11849
      new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
11850
    except errors.OpPrereqError:
11851
      pass
11852
    else:
11853
      raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
11854
                                 " node group (UUID: %s)" %
11855
                                 (self.op.new_name, new_name_uuid),
11856
                                 errors.ECODE_EXISTS)
11857

    
11858
  def BuildHooksEnv(self):
11859
    """Build hooks env.
11860

11861
    """
11862
    return {
11863
      "OLD_NAME": self.op.group_name,
11864
      "NEW_NAME": self.op.new_name,
11865
      }
11866

    
11867
  def BuildHooksNodes(self):
11868
    """Build hooks nodes.
11869

11870
    """
11871
    mn = self.cfg.GetMasterNode()
11872

    
11873
    all_nodes = self.cfg.GetAllNodesInfo()
11874
    all_nodes.pop(mn, None)
11875

    
11876
    run_nodes = [mn]
11877
    run_nodes.extend(node.name for node in all_nodes.values()
11878
                     if node.group == self.group_uuid)
11879

    
11880
    return (run_nodes, run_nodes)
11881

    
11882
  def Exec(self, feedback_fn):
11883
    """Rename the node group.
11884

11885
    """
11886
    group = self.cfg.GetNodeGroup(self.group_uuid)
11887

    
11888
    if group is None:
11889
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
11890
                               (self.op.group_name, self.group_uuid))
11891

    
11892
    group.name = self.op.new_name
11893
    self.cfg.Update(group, feedback_fn)
11894

    
11895
    return self.op.new_name
11896

    
11897

    
11898
class LUGroupEvacuate(LogicalUnit):
11899
  HPATH = "group-evacuate"
11900
  HTYPE = constants.HTYPE_GROUP
11901
  REQ_BGL = False
11902

    
11903
  def ExpandNames(self):
11904
    # This raises errors.OpPrereqError on its own:
11905
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11906

    
11907
    if self.op.target_groups:
11908
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
11909
                                  self.op.target_groups)
11910
    else:
11911
      self.req_target_uuids = []
11912

    
11913
    if self.group_uuid in self.req_target_uuids:
11914
      raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
11915
                                 " as a target group (targets are %s)" %
11916
                                 (self.group_uuid,
11917
                                  utils.CommaJoin(self.req_target_uuids)),
11918
                                 errors.ECODE_INVAL)
11919

    
11920
    if not self.op.iallocator:
11921
      # Use default iallocator
11922
      self.op.iallocator = self.cfg.GetDefaultIAllocator()
11923

    
11924
    if not self.op.iallocator:
11925
      raise errors.OpPrereqError("No iallocator was specified, neither in the"
11926
                                 " opcode nor as a cluster-wide default",
11927
                                 errors.ECODE_INVAL)
11928

    
11929
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
11930
    self.needed_locks = {
11931
      locking.LEVEL_INSTANCE: [],
11932
      locking.LEVEL_NODEGROUP: [],
11933
      locking.LEVEL_NODE: [],
11934
      }
11935

    
11936
  def DeclareLocks(self, level):
11937
    if level == locking.LEVEL_INSTANCE:
11938
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
11939

    
11940
      # Lock instances optimistically, needs verification once node and group
11941
      # locks have been acquired
11942
      self.needed_locks[locking.LEVEL_INSTANCE] = \
11943
        self.cfg.GetNodeGroupInstances(self.group_uuid)
11944

    
11945
    elif level == locking.LEVEL_NODEGROUP:
11946
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
11947

    
11948
      if self.req_target_uuids:
11949
        lock_groups = set([self.group_uuid] + self.req_target_uuids)
11950

    
11951
        # Lock all groups used by instances optimistically; this requires going
11952
        # via the node before it's locked, requiring verification later on
11953
        lock_groups.update(group_uuid
11954
                           for instance_name in
11955
                             self.glm.list_owned(locking.LEVEL_INSTANCE)
11956
                           for group_uuid in
11957
                             self.cfg.GetInstanceNodeGroups(instance_name))
11958
      else:
11959
        # No target groups, need to lock all of them
11960
        lock_groups = locking.ALL_SET
11961

    
11962
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
11963

    
11964
    elif level == locking.LEVEL_NODE:
11965
      # This will only lock the nodes in the group to be evacuated which
11966
      # contain actual instances
11967
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
11968
      self._LockInstancesNodes()
11969

    
11970
      # Lock all nodes in group to be evacuated
11971
      assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
11972
      member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
11973
      self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
11974

    
11975
  def CheckPrereq(self):
11976
    owned_instances = frozenset(self.glm.list_owned(locking.LEVEL_INSTANCE))
11977
    owned_groups = frozenset(self.glm.list_owned(locking.LEVEL_NODEGROUP))
11978
    owned_nodes = frozenset(self.glm.list_owned(locking.LEVEL_NODE))
11979

    
11980
    assert owned_groups.issuperset(self.req_target_uuids)
11981
    assert self.group_uuid in owned_groups
11982

    
11983
    # Check if locked instances are still correct
11984
    wanted_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
11985
    if owned_instances != wanted_instances:
11986
      raise errors.OpPrereqError("Instances in node group to be evacuated (%s)"
11987
                                 " changed since locks were acquired, wanted"
11988
                                 " %s, have %s; retry the operation" %
11989
                                 (self.group_uuid,
11990
                                  utils.CommaJoin(wanted_instances),
11991
                                  utils.CommaJoin(owned_instances)),
11992
                                 errors.ECODE_STATE)
11993

    
11994
    # Get instance information
11995
    self.instances = dict((name, self.cfg.GetInstanceInfo(name))
11996
                          for name in owned_instances)
11997

    
11998
    # Check if node groups for locked instances are still correct
11999
    for instance_name in owned_instances:
12000
      inst = self.instances[instance_name]
12001
      assert self.group_uuid in self.cfg.GetInstanceNodeGroups(instance_name), \
12002
        "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
12003
      assert owned_nodes.issuperset(inst.all_nodes), \
12004
        "Instance %s's nodes changed while we kept the lock" % instance_name
12005

    
12006
      inst_groups = self.cfg.GetInstanceNodeGroups(instance_name)
12007
      if not owned_groups.issuperset(inst_groups):
12008
        raise errors.OpPrereqError("Instance's node groups changed since locks"
12009
                                   " were acquired, current groups are '%s',"
12010
                                   " owning groups '%s'; retry the operation" %
12011
                                   (utils.CommaJoin(inst_groups),
12012
                                    utils.CommaJoin(owned_groups)),
12013
                                   errors.ECODE_STATE)
12014

    
12015
    if self.req_target_uuids:
12016
      # User requested specific target groups
12017
      self.target_uuids = self.req_target_uuids
12018
    else:
12019
      # All groups except the one to be evacuated are potential targets
12020
      self.target_uuids = [group_uuid for group_uuid in owned_groups
12021
                           if group_uuid != self.group_uuid]
12022

    
12023
      if not self.target_uuids:
12024
        raise errors.OpExecError("There are no possible target groups")
12025

    
12026
  def BuildHooksEnv(self):
12027
    """Build hooks env.
12028

12029
    """
12030
    return {
12031
      "GROUP_NAME": self.op.group_name,
12032
      "TARGET_GROUPS": " ".join(self.target_uuids),
12033
      }
12034

    
12035
  def BuildHooksNodes(self):
12036
    """Build hooks nodes.
12037

12038
    """
12039
    mn = self.cfg.GetMasterNode()
12040

    
12041
    assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
12042

    
12043
    run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
12044

    
12045
    return (run_nodes, run_nodes)
12046

    
12047
  def Exec(self, feedback_fn):
12048
    instances = list(self.glm.list_owned(locking.LEVEL_INSTANCE))
12049

    
12050
    assert self.group_uuid not in self.target_uuids
12051

    
12052
    ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
12053
                     instances=instances, target_groups=self.target_uuids)
12054

    
12055
    ial.Run(self.op.iallocator)
12056

    
12057
    if not ial.success:
12058
      raise errors.OpPrereqError("Can't compute group evacuation using"
12059
                                 " iallocator '%s': %s" %
12060
                                 (self.op.iallocator, ial.info),
12061
                                 errors.ECODE_NORES)
12062

    
12063
    jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
12064

    
12065
    self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
12066
                 len(jobs), self.op.group_name)
12067

    
12068
    return ResultWithJobs(jobs)
12069

    
12070

    
12071
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
12072
  """Generic tags LU.
12073

12074
  This is an abstract class which is the parent of all the other tags LUs.
12075

12076
  """
12077
  def ExpandNames(self):
12078
    self.group_uuid = None
12079
    self.needed_locks = {}
12080
    if self.op.kind == constants.TAG_NODE:
12081
      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
12082
      self.needed_locks[locking.LEVEL_NODE] = self.op.name
12083
    elif self.op.kind == constants.TAG_INSTANCE:
12084
      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
12085
      self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
12086
    elif self.op.kind == constants.TAG_NODEGROUP:
12087
      self.group_uuid = self.cfg.LookupNodeGroup(self.op.name)
12088

    
12089
    # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
12090
    # not possible to acquire the BGL based on opcode parameters)
12091

    
12092
  def CheckPrereq(self):
12093
    """Check prerequisites.
12094

12095
    """
12096
    if self.op.kind == constants.TAG_CLUSTER:
12097
      self.target = self.cfg.GetClusterInfo()
12098
    elif self.op.kind == constants.TAG_NODE:
12099
      self.target = self.cfg.GetNodeInfo(self.op.name)
12100
    elif self.op.kind == constants.TAG_INSTANCE:
12101
      self.target = self.cfg.GetInstanceInfo(self.op.name)
12102
    elif self.op.kind == constants.TAG_NODEGROUP:
12103
      self.target = self.cfg.GetNodeGroup(self.group_uuid)
12104
    else:
12105
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
12106
                                 str(self.op.kind), errors.ECODE_INVAL)
12107

    
12108

    
12109
class LUTagsGet(TagsLU):
12110
  """Returns the tags of a given object.
12111

12112
  """
12113
  REQ_BGL = False
12114

    
12115
  def ExpandNames(self):
12116
    TagsLU.ExpandNames(self)
12117

    
12118
    # Share locks as this is only a read operation
12119
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
12120

    
12121
  def Exec(self, feedback_fn):
12122
    """Returns the tag list.
12123

12124
    """
12125
    return list(self.target.GetTags())
12126

    
12127

    
12128
class LUTagsSearch(NoHooksLU):
12129
  """Searches the tags for a given pattern.
12130

12131
  """
12132
  REQ_BGL = False
12133

    
12134
  def ExpandNames(self):
12135
    self.needed_locks = {}
12136

    
12137
  def CheckPrereq(self):
12138
    """Check prerequisites.
12139

12140
    This checks the pattern passed for validity by compiling it.
12141

12142
    """
12143
    try:
12144
      self.re = re.compile(self.op.pattern)
12145
    except re.error, err:
12146
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
12147
                                 (self.op.pattern, err), errors.ECODE_INVAL)
12148

    
12149
  def Exec(self, feedback_fn):
12150
    """Returns the tag list.
12151

12152
    """
12153
    cfg = self.cfg
12154
    tgts = [("/cluster", cfg.GetClusterInfo())]
12155
    ilist = cfg.GetAllInstancesInfo().values()
12156
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
12157
    nlist = cfg.GetAllNodesInfo().values()
12158
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
12159
    tgts.extend(("/nodegroup/%s" % n.name, n)
12160
                for n in cfg.GetAllNodeGroupsInfo().values())
12161
    results = []
12162
    for path, target in tgts:
12163
      for tag in target.GetTags():
12164
        if self.re.search(tag):
12165
          results.append((path, tag))
12166
    return results
12167

    
12168

    
12169
class LUTagsSet(TagsLU):
12170
  """Sets a tag on a given object.
12171

12172
  """
12173
  REQ_BGL = False
12174

    
12175
  def CheckPrereq(self):
12176
    """Check prerequisites.
12177

12178
    This checks the type and length of the tag name and value.
12179

12180
    """
12181
    TagsLU.CheckPrereq(self)
12182
    for tag in self.op.tags:
12183
      objects.TaggableObject.ValidateTag(tag)
12184

    
12185
  def Exec(self, feedback_fn):
12186
    """Sets the tag.
12187

12188
    """
12189
    try:
12190
      for tag in self.op.tags:
12191
        self.target.AddTag(tag)
12192
    except errors.TagError, err:
12193
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
12194
    self.cfg.Update(self.target, feedback_fn)
12195

    
12196

    
12197
class LUTagsDel(TagsLU):
12198
  """Delete a list of tags from a given object.
12199

12200
  """
12201
  REQ_BGL = False
12202

    
12203
  def CheckPrereq(self):
12204
    """Check prerequisites.
12205

12206
    This checks that we have the given tag.
12207

12208
    """
12209
    TagsLU.CheckPrereq(self)
12210
    for tag in self.op.tags:
12211
      objects.TaggableObject.ValidateTag(tag)
12212
    del_tags = frozenset(self.op.tags)
12213
    cur_tags = self.target.GetTags()
12214

    
12215
    diff_tags = del_tags - cur_tags
12216
    if diff_tags:
12217
      diff_names = ("'%s'" % i for i in sorted(diff_tags))
12218
      raise errors.OpPrereqError("Tag(s) %s not found" %
12219
                                 (utils.CommaJoin(diff_names), ),
12220
                                 errors.ECODE_NOENT)
12221

    
12222
  def Exec(self, feedback_fn):
12223
    """Remove the tag from the object.
12224

12225
    """
12226
    for tag in self.op.tags:
12227
      self.target.RemoveTag(tag)
12228
    self.cfg.Update(self.target, feedback_fn)
12229

    
12230

    
12231
class LUTestDelay(NoHooksLU):
12232
  """Sleep for a specified amount of time.
12233

12234
  This LU sleeps on the master and/or nodes for a specified amount of
12235
  time.
12236

12237
  """
12238
  REQ_BGL = False
12239

    
12240
  def ExpandNames(self):
12241
    """Expand names and set required locks.
12242

12243
    This expands the node list, if any.
12244

12245
    """
12246
    self.needed_locks = {}
12247
    if self.op.on_nodes:
12248
      # _GetWantedNodes can be used here, but is not always appropriate to use
12249
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
12250
      # more information.
12251
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
12252
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
12253

    
12254
  def _TestDelay(self):
12255
    """Do the actual sleep.
12256

12257
    """
12258
    if self.op.on_master:
12259
      if not utils.TestDelay(self.op.duration):
12260
        raise errors.OpExecError("Error during master delay test")
12261
    if self.op.on_nodes:
12262
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
12263
      for node, node_result in result.items():
12264
        node_result.Raise("Failure during rpc call to node %s" % node)
12265

    
12266
  def Exec(self, feedback_fn):
12267
    """Execute the test delay opcode, with the wanted repetitions.
12268

12269
    """
12270
    if self.op.repeat == 0:
12271
      self._TestDelay()
12272
    else:
12273
      top_value = self.op.repeat - 1
12274
      for i in range(self.op.repeat):
12275
        self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
12276
        self._TestDelay()
12277

    
12278

    
12279
class LUTestJqueue(NoHooksLU):
12280
  """Utility LU to test some aspects of the job queue.
12281

12282
  """
12283
  REQ_BGL = False
12284

    
12285
  # Must be lower than default timeout for WaitForJobChange to see whether it
12286
  # notices changed jobs
12287
  _CLIENT_CONNECT_TIMEOUT = 20.0
12288
  _CLIENT_CONFIRM_TIMEOUT = 60.0
12289

    
12290
  @classmethod
12291
  def _NotifyUsingSocket(cls, cb, errcls):
12292
    """Opens a Unix socket and waits for another program to connect.
12293

12294
    @type cb: callable
12295
    @param cb: Callback to send socket name to client
12296
    @type errcls: class
12297
    @param errcls: Exception class to use for errors
12298

12299
    """
12300
    # Using a temporary directory as there's no easy way to create temporary
12301
    # sockets without writing a custom loop around tempfile.mktemp and
12302
    # socket.bind
12303
    tmpdir = tempfile.mkdtemp()
12304
    try:
12305
      tmpsock = utils.PathJoin(tmpdir, "sock")
12306

    
12307
      logging.debug("Creating temporary socket at %s", tmpsock)
12308
      sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
12309
      try:
12310
        sock.bind(tmpsock)
12311
        sock.listen(1)
12312

    
12313
        # Send details to client
12314
        cb(tmpsock)
12315

    
12316
        # Wait for client to connect before continuing
12317
        sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
12318
        try:
12319
          (conn, _) = sock.accept()
12320
        except socket.error, err:
12321
          raise errcls("Client didn't connect in time (%s)" % err)
12322
      finally:
12323
        sock.close()
12324
    finally:
12325
      # Remove as soon as client is connected
12326
      shutil.rmtree(tmpdir)
12327

    
12328
    # Wait for client to close
12329
    try:
12330
      try:
12331
        # pylint: disable-msg=E1101
12332
        # Instance of '_socketobject' has no ... member
12333
        conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
12334
        conn.recv(1)
12335
      except socket.error, err:
12336
        raise errcls("Client failed to confirm notification (%s)" % err)
12337
    finally:
12338
      conn.close()
12339

    
12340
  def _SendNotification(self, test, arg, sockname):
12341
    """Sends a notification to the client.
12342

12343
    @type test: string
12344
    @param test: Test name
12345
    @param arg: Test argument (depends on test)
12346
    @type sockname: string
12347
    @param sockname: Socket path
12348

12349
    """
12350
    self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
12351

    
12352
  def _Notify(self, prereq, test, arg):
12353
    """Notifies the client of a test.
12354

12355
    @type prereq: bool
12356
    @param prereq: Whether this is a prereq-phase test
12357
    @type test: string
12358
    @param test: Test name
12359
    @param arg: Test argument (depends on test)
12360

12361
    """
12362
    if prereq:
12363
      errcls = errors.OpPrereqError
12364
    else:
12365
      errcls = errors.OpExecError
12366

    
12367
    return self._NotifyUsingSocket(compat.partial(self._SendNotification,
12368
                                                  test, arg),
12369
                                   errcls)
12370

    
12371
  def CheckArguments(self):
12372
    self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
12373
    self.expandnames_calls = 0
12374

    
12375
  def ExpandNames(self):
12376
    checkargs_calls = getattr(self, "checkargs_calls", 0)
12377
    if checkargs_calls < 1:
12378
      raise errors.ProgrammerError("CheckArguments was not called")
12379

    
12380
    self.expandnames_calls += 1
12381

    
12382
    if self.op.notify_waitlock:
12383
      self._Notify(True, constants.JQT_EXPANDNAMES, None)
12384

    
12385
    self.LogInfo("Expanding names")
12386

    
12387
    # Get lock on master node (just to get a lock, not for a particular reason)
12388
    self.needed_locks = {
12389
      locking.LEVEL_NODE: self.cfg.GetMasterNode(),
12390
      }
12391

    
12392
  def Exec(self, feedback_fn):
12393
    if self.expandnames_calls < 1:
12394
      raise errors.ProgrammerError("ExpandNames was not called")
12395

    
12396
    if self.op.notify_exec:
12397
      self._Notify(False, constants.JQT_EXEC, None)
12398

    
12399
    self.LogInfo("Executing")
12400

    
12401
    if self.op.log_messages:
12402
      self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
12403
      for idx, msg in enumerate(self.op.log_messages):
12404
        self.LogInfo("Sending log message %s", idx + 1)
12405
        feedback_fn(constants.JQT_MSGPREFIX + msg)
12406
        # Report how many test messages have been sent
12407
        self._Notify(False, constants.JQT_LOGMSG, idx + 1)
12408

    
12409
    if self.op.fail:
12410
      raise errors.OpExecError("Opcode failure was requested")
12411

    
12412
    return True
12413

    
12414

    
12415
class IAllocator(object):
12416
  """IAllocator framework.
12417

12418
  An IAllocator instance has three sets of attributes:
12419
    - cfg that is needed to query the cluster
12420
    - input data (all members of the _KEYS class attribute are required)
12421
    - four buffer attributes (in|out_data|text), that represent the
12422
      input (to the external script) in text and data structure format,
12423
      and the output from it, again in two formats
12424
    - the result variables from the script (success, info, nodes) for
12425
      easy usage
12426

12427
  """
12428
  # pylint: disable-msg=R0902
12429
  # lots of instance attributes
12430

    
12431
  def __init__(self, cfg, rpc, mode, **kwargs):
12432
    self.cfg = cfg
12433
    self.rpc = rpc
12434
    # init buffer variables
12435
    self.in_text = self.out_text = self.in_data = self.out_data = None
12436
    # init all input fields so that pylint is happy
12437
    self.mode = mode
12438
    self.memory = self.disks = self.disk_template = None
12439
    self.os = self.tags = self.nics = self.vcpus = None
12440
    self.hypervisor = None
12441
    self.relocate_from = None
12442
    self.name = None
12443
    self.evac_nodes = None
12444
    self.instances = None
12445
    self.evac_mode = None
12446
    self.target_groups = []
12447
    # computed fields
12448
    self.required_nodes = None
12449
    # init result fields
12450
    self.success = self.info = self.result = None
12451

    
12452
    try:
12453
      (fn, keydata, self._result_check) = self._MODE_DATA[self.mode]
12454
    except KeyError:
12455
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
12456
                                   " IAllocator" % self.mode)
12457

    
12458
    keyset = [n for (n, _) in keydata]
12459

    
12460
    for key in kwargs:
12461
      if key not in keyset:
12462
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
12463
                                     " IAllocator" % key)
12464
      setattr(self, key, kwargs[key])
12465

    
12466
    for key in keyset:
12467
      if key not in kwargs:
12468
        raise errors.ProgrammerError("Missing input parameter '%s' to"
12469
                                     " IAllocator" % key)
12470
    self._BuildInputData(compat.partial(fn, self), keydata)
12471

    
12472
  def _ComputeClusterData(self):
12473
    """Compute the generic allocator input data.
12474

12475
    This is the data that is independent of the actual operation.
12476

12477
    """
12478
    cfg = self.cfg
12479
    cluster_info = cfg.GetClusterInfo()
12480
    # cluster data
12481
    data = {
12482
      "version": constants.IALLOCATOR_VERSION,
12483
      "cluster_name": cfg.GetClusterName(),
12484
      "cluster_tags": list(cluster_info.GetTags()),
12485
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
12486
      # we don't have job IDs
12487
      }
12488
    ninfo = cfg.GetAllNodesInfo()
12489
    iinfo = cfg.GetAllInstancesInfo().values()
12490
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
12491

    
12492
    # node data
12493
    node_list = [n.name for n in ninfo.values() if n.vm_capable]
12494

    
12495
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
12496
      hypervisor_name = self.hypervisor
12497
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
12498
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
12499
    else:
12500
      hypervisor_name = cluster_info.enabled_hypervisors[0]
12501

    
12502
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
12503
                                        hypervisor_name)
12504
    node_iinfo = \
12505
      self.rpc.call_all_instances_info(node_list,
12506
                                       cluster_info.enabled_hypervisors)
12507

    
12508
    data["nodegroups"] = self._ComputeNodeGroupData(cfg)
12509

    
12510
    config_ndata = self._ComputeBasicNodeData(ninfo)
12511
    data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
12512
                                                 i_list, config_ndata)
12513
    assert len(data["nodes"]) == len(ninfo), \
12514
        "Incomplete node data computed"
12515

    
12516
    data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
12517

    
12518
    self.in_data = data
12519

    
12520
  @staticmethod
12521
  def _ComputeNodeGroupData(cfg):
12522
    """Compute node groups data.
12523

12524
    """
12525
    ng = dict((guuid, {
12526
      "name": gdata.name,
12527
      "alloc_policy": gdata.alloc_policy,
12528
      })
12529
      for guuid, gdata in cfg.GetAllNodeGroupsInfo().items())
12530

    
12531
    return ng
12532

    
12533
  @staticmethod
12534
  def _ComputeBasicNodeData(node_cfg):
12535
    """Compute global node data.
12536

12537
    @rtype: dict
12538
    @returns: a dict of name: (node dict, node config)
12539

12540
    """
12541
    # fill in static (config-based) values
12542
    node_results = dict((ninfo.name, {
12543
      "tags": list(ninfo.GetTags()),
12544
      "primary_ip": ninfo.primary_ip,
12545
      "secondary_ip": ninfo.secondary_ip,
12546
      "offline": ninfo.offline,
12547
      "drained": ninfo.drained,
12548
      "master_candidate": ninfo.master_candidate,
12549
      "group": ninfo.group,
12550
      "master_capable": ninfo.master_capable,
12551
      "vm_capable": ninfo.vm_capable,
12552
      })
12553
      for ninfo in node_cfg.values())
12554

    
12555
    return node_results
12556

    
12557
  @staticmethod
12558
  def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
12559
                              node_results):
12560
    """Compute global node data.
12561

12562
    @param node_results: the basic node structures as filled from the config
12563

12564
    """
12565
    # make a copy of the current dict
12566
    node_results = dict(node_results)
12567
    for nname, nresult in node_data.items():
12568
      assert nname in node_results, "Missing basic data for node %s" % nname
12569
      ninfo = node_cfg[nname]
12570

    
12571
      if not (ninfo.offline or ninfo.drained):
12572
        nresult.Raise("Can't get data for node %s" % nname)
12573
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
12574
                                nname)
12575
        remote_info = nresult.payload
12576

    
12577
        for attr in ["memory_total", "memory_free", "memory_dom0",
12578
                     "vg_size", "vg_free", "cpu_total"]:
12579
          if attr not in remote_info:
12580
            raise errors.OpExecError("Node '%s' didn't return attribute"
12581
                                     " '%s'" % (nname, attr))
12582
          if not isinstance(remote_info[attr], int):
12583
            raise errors.OpExecError("Node '%s' returned invalid value"
12584
                                     " for '%s': %s" %
12585
                                     (nname, attr, remote_info[attr]))
12586
        # compute memory used by primary instances
12587
        i_p_mem = i_p_up_mem = 0
12588
        for iinfo, beinfo in i_list:
12589
          if iinfo.primary_node == nname:
12590
            i_p_mem += beinfo[constants.BE_MEMORY]
12591
            if iinfo.name not in node_iinfo[nname].payload:
12592
              i_used_mem = 0
12593
            else:
12594
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]["memory"])
12595
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
12596
            remote_info["memory_free"] -= max(0, i_mem_diff)
12597

    
12598
            if iinfo.admin_up:
12599
              i_p_up_mem += beinfo[constants.BE_MEMORY]
12600

    
12601
        # compute memory used by instances
12602
        pnr_dyn = {
12603
          "total_memory": remote_info["memory_total"],
12604
          "reserved_memory": remote_info["memory_dom0"],
12605
          "free_memory": remote_info["memory_free"],
12606
          "total_disk": remote_info["vg_size"],
12607
          "free_disk": remote_info["vg_free"],
12608
          "total_cpus": remote_info["cpu_total"],
12609
          "i_pri_memory": i_p_mem,
12610
          "i_pri_up_memory": i_p_up_mem,
12611
          }
12612
        pnr_dyn.update(node_results[nname])
12613
        node_results[nname] = pnr_dyn
12614

    
12615
    return node_results
12616

    
12617
  @staticmethod
12618
  def _ComputeInstanceData(cluster_info, i_list):
12619
    """Compute global instance data.
12620

12621
    """
12622
    instance_data = {}
12623
    for iinfo, beinfo in i_list:
12624
      nic_data = []
12625
      for nic in iinfo.nics:
12626
        filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
12627
        nic_dict = {
12628
          "mac": nic.mac,
12629
          "ip": nic.ip,
12630
          "mode": filled_params[constants.NIC_MODE],
12631
          "link": filled_params[constants.NIC_LINK],
12632
          }
12633
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
12634
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
12635
        nic_data.append(nic_dict)
12636
      pir = {
12637
        "tags": list(iinfo.GetTags()),
12638
        "admin_up": iinfo.admin_up,
12639
        "vcpus": beinfo[constants.BE_VCPUS],
12640
        "memory": beinfo[constants.BE_MEMORY],
12641
        "os": iinfo.os,
12642
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
12643
        "nics": nic_data,
12644
        "disks": [{constants.IDISK_SIZE: dsk.size,
12645
                   constants.IDISK_MODE: dsk.mode}
12646
                  for dsk in iinfo.disks],
12647
        "disk_template": iinfo.disk_template,
12648
        "hypervisor": iinfo.hypervisor,
12649
        }
12650
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
12651
                                                 pir["disks"])
12652
      instance_data[iinfo.name] = pir
12653

    
12654
    return instance_data
12655

    
12656
  def _AddNewInstance(self):
12657
    """Add new instance data to allocator structure.
12658

12659
    This in combination with _AllocatorGetClusterData will create the
12660
    correct structure needed as input for the allocator.
12661

12662
    The checks for the completeness of the opcode must have already been
12663
    done.
12664

12665
    """
12666
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
12667

    
12668
    if self.disk_template in constants.DTS_INT_MIRROR:
12669
      self.required_nodes = 2
12670
    else:
12671
      self.required_nodes = 1
12672

    
12673
    request = {
12674
      "name": self.name,
12675
      "disk_template": self.disk_template,
12676
      "tags": self.tags,
12677
      "os": self.os,
12678
      "vcpus": self.vcpus,
12679
      "memory": self.memory,
12680
      "disks": self.disks,
12681
      "disk_space_total": disk_space,
12682
      "nics": self.nics,
12683
      "required_nodes": self.required_nodes,
12684
      "hypervisor": self.hypervisor,
12685
      }
12686

    
12687
    return request
12688

    
12689
  def _AddRelocateInstance(self):
12690
    """Add relocate instance data to allocator structure.
12691

12692
    This in combination with _IAllocatorGetClusterData will create the
12693
    correct structure needed as input for the allocator.
12694

12695
    The checks for the completeness of the opcode must have already been
12696
    done.
12697

12698
    """
12699
    instance = self.cfg.GetInstanceInfo(self.name)
12700
    if instance is None:
12701
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
12702
                                   " IAllocator" % self.name)
12703

    
12704
    if instance.disk_template not in constants.DTS_MIRRORED:
12705
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
12706
                                 errors.ECODE_INVAL)
12707

    
12708
    if instance.disk_template in constants.DTS_INT_MIRROR and \
12709
        len(instance.secondary_nodes) != 1:
12710
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
12711
                                 errors.ECODE_STATE)
12712

    
12713
    self.required_nodes = 1
12714
    disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks]
12715
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
12716

    
12717
    request = {
12718
      "name": self.name,
12719
      "disk_space_total": disk_space,
12720
      "required_nodes": self.required_nodes,
12721
      "relocate_from": self.relocate_from,
12722
      }
12723
    return request
12724

    
12725
  def _AddEvacuateNodes(self):
12726
    """Add evacuate nodes data to allocator structure.
12727

12728
    """
12729
    request = {
12730
      "evac_nodes": self.evac_nodes
12731
      }
12732
    return request
12733

    
12734
  def _AddNodeEvacuate(self):
12735
    """Get data for node-evacuate requests.
12736

12737
    """
12738
    return {
12739
      "instances": self.instances,
12740
      "evac_mode": self.evac_mode,
12741
      }
12742

    
12743
  def _AddChangeGroup(self):
12744
    """Get data for node-evacuate requests.
12745

12746
    """
12747
    return {
12748
      "instances": self.instances,
12749
      "target_groups": self.target_groups,
12750
      }
12751

    
12752
  def _BuildInputData(self, fn, keydata):
12753
    """Build input data structures.
12754

12755
    """
12756
    self._ComputeClusterData()
12757

    
12758
    request = fn()
12759
    request["type"] = self.mode
12760
    for keyname, keytype in keydata:
12761
      if keyname not in request:
12762
        raise errors.ProgrammerError("Request parameter %s is missing" %
12763
                                     keyname)
12764
      val = request[keyname]
12765
      if not keytype(val):
12766
        raise errors.ProgrammerError("Request parameter %s doesn't pass"
12767
                                     " validation, value %s, expected"
12768
                                     " type %s" % (keyname, val, keytype))
12769
    self.in_data["request"] = request
12770

    
12771
    self.in_text = serializer.Dump(self.in_data)
12772

    
12773
  _STRING_LIST = ht.TListOf(ht.TString)
12774
  _JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
12775
     # pylint: disable-msg=E1101
12776
     # Class '...' has no 'OP_ID' member
12777
     "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
12778
                          opcodes.OpInstanceMigrate.OP_ID,
12779
                          opcodes.OpInstanceReplaceDisks.OP_ID])
12780
     })))
12781

    
12782
  _NEVAC_MOVED = \
12783
    ht.TListOf(ht.TAnd(ht.TIsLength(3),
12784
                       ht.TItems([ht.TNonEmptyString,
12785
                                  ht.TNonEmptyString,
12786
                                  ht.TListOf(ht.TNonEmptyString),
12787
                                 ])))
12788
  _NEVAC_FAILED = \
12789
    ht.TListOf(ht.TAnd(ht.TIsLength(2),
12790
                       ht.TItems([ht.TNonEmptyString,
12791
                                  ht.TMaybeString,
12792
                                 ])))
12793
  _NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
12794
                          ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST]))
12795

    
12796
  _MODE_DATA = {
12797
    constants.IALLOCATOR_MODE_ALLOC:
12798
      (_AddNewInstance,
12799
       [
12800
        ("name", ht.TString),
12801
        ("memory", ht.TInt),
12802
        ("disks", ht.TListOf(ht.TDict)),
12803
        ("disk_template", ht.TString),
12804
        ("os", ht.TString),
12805
        ("tags", _STRING_LIST),
12806
        ("nics", ht.TListOf(ht.TDict)),
12807
        ("vcpus", ht.TInt),
12808
        ("hypervisor", ht.TString),
12809
        ], ht.TList),
12810
    constants.IALLOCATOR_MODE_RELOC:
12811
      (_AddRelocateInstance,
12812
       [("name", ht.TString), ("relocate_from", _STRING_LIST)],
12813
       ht.TList),
12814
    constants.IALLOCATOR_MODE_MEVAC:
12815
      (_AddEvacuateNodes, [("evac_nodes", _STRING_LIST)],
12816
       ht.TListOf(ht.TAnd(ht.TIsLength(2), _STRING_LIST))),
12817
     constants.IALLOCATOR_MODE_NODE_EVAC:
12818
      (_AddNodeEvacuate, [
12819
        ("instances", _STRING_LIST),
12820
        ("evac_mode", ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES)),
12821
        ], _NEVAC_RESULT),
12822
     constants.IALLOCATOR_MODE_CHG_GROUP:
12823
      (_AddChangeGroup, [
12824
        ("instances", _STRING_LIST),
12825
        ("target_groups", _STRING_LIST),
12826
        ], _NEVAC_RESULT),
12827
    }
12828

    
12829
  def Run(self, name, validate=True, call_fn=None):
12830
    """Run an instance allocator and return the results.
12831

12832
    """
12833
    if call_fn is None:
12834
      call_fn = self.rpc.call_iallocator_runner
12835

    
12836
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
12837
    result.Raise("Failure while running the iallocator script")
12838

    
12839
    self.out_text = result.payload
12840
    if validate:
12841
      self._ValidateResult()
12842

    
12843
  def _ValidateResult(self):
12844
    """Process the allocator results.
12845

12846
    This will process and if successful save the result in
12847
    self.out_data and the other parameters.
12848

12849
    """
12850
    try:
12851
      rdict = serializer.Load(self.out_text)
12852
    except Exception, err:
12853
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
12854

    
12855
    if not isinstance(rdict, dict):
12856
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
12857

    
12858
    # TODO: remove backwards compatiblity in later versions
12859
    if "nodes" in rdict and "result" not in rdict:
12860
      rdict["result"] = rdict["nodes"]
12861
      del rdict["nodes"]
12862

    
12863
    for key in "success", "info", "result":
12864
      if key not in rdict:
12865
        raise errors.OpExecError("Can't parse iallocator results:"
12866
                                 " missing key '%s'" % key)
12867
      setattr(self, key, rdict[key])
12868

    
12869
    if not self._result_check(self.result):
12870
      raise errors.OpExecError("Iallocator returned invalid result,"
12871
                               " expected %s, got %s" %
12872
                               (self._result_check, self.result),
12873
                               errors.ECODE_INVAL)
12874

    
12875
    if self.mode in (constants.IALLOCATOR_MODE_RELOC,
12876
                     constants.IALLOCATOR_MODE_MEVAC):
12877
      node2group = dict((name, ndata["group"])
12878
                        for (name, ndata) in self.in_data["nodes"].items())
12879

    
12880
      fn = compat.partial(self._NodesToGroups, node2group,
12881
                          self.in_data["nodegroups"])
12882

    
12883
      if self.mode == constants.IALLOCATOR_MODE_RELOC:
12884
        assert self.relocate_from is not None
12885
        assert self.required_nodes == 1
12886

    
12887
        request_groups = fn(self.relocate_from)
12888
        result_groups = fn(rdict["result"])
12889

    
12890
        if result_groups != request_groups:
12891
          raise errors.OpExecError("Groups of nodes returned by iallocator (%s)"
12892
                                   " differ from original groups (%s)" %
12893
                                   (utils.CommaJoin(result_groups),
12894
                                    utils.CommaJoin(request_groups)))
12895
      elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
12896
        request_groups = fn(self.evac_nodes)
12897
        for (instance_name, secnode) in self.result:
12898
          result_groups = fn([secnode])
12899
          if result_groups != request_groups:
12900
            raise errors.OpExecError("Iallocator returned new secondary node"
12901
                                     " '%s' (group '%s') for instance '%s'"
12902
                                     " which is not in original group '%s'" %
12903
                                     (secnode, utils.CommaJoin(result_groups),
12904
                                      instance_name,
12905
                                      utils.CommaJoin(request_groups)))
12906
      else:
12907
        raise errors.ProgrammerError("Unhandled mode '%s'" % self.mode)
12908

    
12909
    elif self.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
12910
      assert self.evac_mode in constants.IALLOCATOR_NEVAC_MODES
12911

    
12912
    self.out_data = rdict
12913

    
12914
  @staticmethod
12915
  def _NodesToGroups(node2group, groups, nodes):
12916
    """Returns a list of unique group names for a list of nodes.
12917

12918
    @type node2group: dict
12919
    @param node2group: Map from node name to group UUID
12920
    @type groups: dict
12921
    @param groups: Group information
12922
    @type nodes: list
12923
    @param nodes: Node names
12924

12925
    """
12926
    result = set()
12927

    
12928
    for node in nodes:
12929
      try:
12930
        group_uuid = node2group[node]
12931
      except KeyError:
12932
        # Ignore unknown node
12933
        pass
12934
      else:
12935
        try:
12936
          group = groups[group_uuid]
12937
        except KeyError:
12938
          # Can't find group, let's use UUID
12939
          group_name = group_uuid
12940
        else:
12941
          group_name = group["name"]
12942

    
12943
        result.add(group_name)
12944

    
12945
    return sorted(result)
12946

    
12947

    
12948
class LUTestAllocator(NoHooksLU):
12949
  """Run allocator tests.
12950

12951
  This LU runs the allocator tests
12952

12953
  """
12954
  def CheckPrereq(self):
12955
    """Check prerequisites.
12956

12957
    This checks the opcode parameters depending on the director and mode test.
12958

12959
    """
12960
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
12961
      for attr in ["memory", "disks", "disk_template",
12962
                   "os", "tags", "nics", "vcpus"]:
12963
        if not hasattr(self.op, attr):
12964
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
12965
                                     attr, errors.ECODE_INVAL)
12966
      iname = self.cfg.ExpandInstanceName(self.op.name)
12967
      if iname is not None:
12968
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
12969
                                   iname, errors.ECODE_EXISTS)
12970
      if not isinstance(self.op.nics, list):
12971
        raise errors.OpPrereqError("Invalid parameter 'nics'",
12972
                                   errors.ECODE_INVAL)
12973
      if not isinstance(self.op.disks, list):
12974
        raise errors.OpPrereqError("Invalid parameter 'disks'",
12975
                                   errors.ECODE_INVAL)
12976
      for row in self.op.disks:
12977
        if (not isinstance(row, dict) or
12978
            constants.IDISK_SIZE not in row or
12979
            not isinstance(row[constants.IDISK_SIZE], int) or
12980
            constants.IDISK_MODE not in row or
12981
            row[constants.IDISK_MODE] not in constants.DISK_ACCESS_SET):
12982
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
12983
                                     " parameter", errors.ECODE_INVAL)
12984
      if self.op.hypervisor is None:
12985
        self.op.hypervisor = self.cfg.GetHypervisorType()
12986
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
12987
      fname = _ExpandInstanceName(self.cfg, self.op.name)
12988
      self.op.name = fname
12989
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
12990
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
12991
      if not hasattr(self.op, "evac_nodes"):
12992
        raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
12993
                                   " opcode input", errors.ECODE_INVAL)
12994
    elif self.op.mode in (constants.IALLOCATOR_MODE_CHG_GROUP,
12995
                          constants.IALLOCATOR_MODE_NODE_EVAC):
12996
      if not self.op.instances:
12997
        raise errors.OpPrereqError("Missing instances", errors.ECODE_INVAL)
12998
      self.op.instances = _GetWantedInstances(self, self.op.instances)
12999
    else:
13000
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
13001
                                 self.op.mode, errors.ECODE_INVAL)
13002

    
13003
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
13004
      if self.op.allocator is None:
13005
        raise errors.OpPrereqError("Missing allocator name",
13006
                                   errors.ECODE_INVAL)
13007
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
13008
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
13009
                                 self.op.direction, errors.ECODE_INVAL)
13010

    
13011
  def Exec(self, feedback_fn):
13012
    """Run the allocator test.
13013

13014
    """
13015
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
13016
      ial = IAllocator(self.cfg, self.rpc,
13017
                       mode=self.op.mode,
13018
                       name=self.op.name,
13019
                       memory=self.op.memory,
13020
                       disks=self.op.disks,
13021
                       disk_template=self.op.disk_template,
13022
                       os=self.op.os,
13023
                       tags=self.op.tags,
13024
                       nics=self.op.nics,
13025
                       vcpus=self.op.vcpus,
13026
                       hypervisor=self.op.hypervisor,
13027
                       )
13028
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
13029
      ial = IAllocator(self.cfg, self.rpc,
13030
                       mode=self.op.mode,
13031
                       name=self.op.name,
13032
                       relocate_from=list(self.relocate_from),
13033
                       )
13034
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
13035
      ial = IAllocator(self.cfg, self.rpc,
13036
                       mode=self.op.mode,
13037
                       evac_nodes=self.op.evac_nodes)
13038
    elif self.op.mode == constants.IALLOCATOR_MODE_CHG_GROUP:
13039
      ial = IAllocator(self.cfg, self.rpc,
13040
                       mode=self.op.mode,
13041
                       instances=self.op.instances,
13042
                       target_groups=self.op.target_groups)
13043
    elif self.op.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
13044
      ial = IAllocator(self.cfg, self.rpc,
13045
                       mode=self.op.mode,
13046
                       instances=self.op.instances,
13047
                       evac_mode=self.op.evac_mode)
13048
    else:
13049
      raise errors.ProgrammerError("Uncatched mode %s in"
13050
                                   " LUTestAllocator.Exec", self.op.mode)
13051

    
13052
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
13053
      result = ial.in_text
13054
    else:
13055
      ial.Run(self.op.allocator, validate=False)
13056
      result = ial.out_text
13057
    return result
13058

    
13059

    
13060
#: Query type implementations
13061
_QUERY_IMPL = {
13062
  constants.QR_INSTANCE: _InstanceQuery,
13063
  constants.QR_NODE: _NodeQuery,
13064
  constants.QR_GROUP: _GroupQuery,
13065
  constants.QR_OS: _OsQuery,
13066
  }
13067

    
13068
assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP
13069

    
13070

    
13071
def _GetQueryImplementation(name):
13072
  """Returns the implemtnation for a query type.
13073

13074
  @param name: Query type, must be one of L{constants.QR_VIA_OP}
13075

13076
  """
13077
  try:
13078
    return _QUERY_IMPL[name]
13079
  except KeyError:
13080
    raise errors.OpPrereqError("Unknown query resource '%s'" % name,
13081
                               errors.ECODE_INVAL)