Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 2f10179b

History | View | Annotate | Download (435.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable-msg=W0201,C0302
25

    
26
# W0201 since most LU attributes are defined in CheckPrereq or similar
27
# functions
28

    
29
# C0302: since we have waaaay to many lines in this module
30

    
31
import os
32
import os.path
33
import time
34
import re
35
import platform
36
import logging
37
import copy
38
import OpenSSL
39
import socket
40
import tempfile
41
import shutil
42
import itertools
43

    
44
from ganeti import ssh
45
from ganeti import utils
46
from ganeti import errors
47
from ganeti import hypervisor
48
from ganeti import locking
49
from ganeti import constants
50
from ganeti import objects
51
from ganeti import serializer
52
from ganeti import ssconf
53
from ganeti import uidpool
54
from ganeti import compat
55
from ganeti import masterd
56
from ganeti import netutils
57
from ganeti import query
58
from ganeti import qlang
59
from ganeti import opcodes
60
from ganeti import ht
61

    
62
import ganeti.masterd.instance # pylint: disable-msg=W0611
63

    
64

    
65
def _SupportsOob(cfg, node):
66
  """Tells if node supports OOB.
67

68
  @type cfg: L{config.ConfigWriter}
69
  @param cfg: The cluster configuration
70
  @type node: L{objects.Node}
71
  @param node: The node
72
  @return: The OOB script if supported or an empty string otherwise
73

74
  """
75
  return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
76

    
77

    
78
class ResultWithJobs:
79
  """Data container for LU results with jobs.
80

81
  Instances of this class returned from L{LogicalUnit.Exec} will be recognized
82
  by L{mcpu.Processor._ProcessResult}. The latter will then submit the jobs
83
  contained in the C{jobs} attribute and include the job IDs in the opcode
84
  result.
85

86
  """
87
  def __init__(self, jobs, **kwargs):
88
    """Initializes this class.
89

90
    Additional return values can be specified as keyword arguments.
91

92
    @type jobs: list of lists of L{opcode.OpCode}
93
    @param jobs: A list of lists of opcode objects
94

95
    """
96
    self.jobs = jobs
97
    self.other = kwargs
98

    
99

    
100
class LogicalUnit(object):
101
  """Logical Unit base class.
102

103
  Subclasses must follow these rules:
104
    - implement ExpandNames
105
    - implement CheckPrereq (except when tasklets are used)
106
    - implement Exec (except when tasklets are used)
107
    - implement BuildHooksEnv
108
    - implement BuildHooksNodes
109
    - redefine HPATH and HTYPE
110
    - optionally redefine their run requirements:
111
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
112

113
  Note that all commands require root permissions.
114

115
  @ivar dry_run_result: the value (if any) that will be returned to the caller
116
      in dry-run mode (signalled by opcode dry_run parameter)
117

118
  """
119
  HPATH = None
120
  HTYPE = None
121
  REQ_BGL = True
122

    
123
  def __init__(self, processor, op, context, rpc):
124
    """Constructor for LogicalUnit.
125

126
    This needs to be overridden in derived classes in order to check op
127
    validity.
128

129
    """
130
    self.proc = processor
131
    self.op = op
132
    self.cfg = context.cfg
133
    self.glm = context.glm
134
    self.context = context
135
    self.rpc = rpc
136
    # Dicts used to declare locking needs to mcpu
137
    self.needed_locks = None
138
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
139
    self.add_locks = {}
140
    self.remove_locks = {}
141
    # Used to force good behavior when calling helper functions
142
    self.recalculate_locks = {}
143
    # logging
144
    self.Log = processor.Log # pylint: disable-msg=C0103
145
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
146
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
147
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
148
    # support for dry-run
149
    self.dry_run_result = None
150
    # support for generic debug attribute
151
    if (not hasattr(self.op, "debug_level") or
152
        not isinstance(self.op.debug_level, int)):
153
      self.op.debug_level = 0
154

    
155
    # Tasklets
156
    self.tasklets = None
157

    
158
    # Validate opcode parameters and set defaults
159
    self.op.Validate(True)
160

    
161
    self.CheckArguments()
162

    
163
  def CheckArguments(self):
164
    """Check syntactic validity for the opcode arguments.
165

166
    This method is for doing a simple syntactic check and ensure
167
    validity of opcode parameters, without any cluster-related
168
    checks. While the same can be accomplished in ExpandNames and/or
169
    CheckPrereq, doing these separate is better because:
170

171
      - ExpandNames is left as as purely a lock-related function
172
      - CheckPrereq is run after we have acquired locks (and possible
173
        waited for them)
174

175
    The function is allowed to change the self.op attribute so that
176
    later methods can no longer worry about missing parameters.
177

178
    """
179
    pass
180

    
181
  def ExpandNames(self):
182
    """Expand names for this LU.
183

184
    This method is called before starting to execute the opcode, and it should
185
    update all the parameters of the opcode to their canonical form (e.g. a
186
    short node name must be fully expanded after this method has successfully
187
    completed). This way locking, hooks, logging, etc. can work correctly.
188

189
    LUs which implement this method must also populate the self.needed_locks
190
    member, as a dict with lock levels as keys, and a list of needed lock names
191
    as values. Rules:
192

193
      - use an empty dict if you don't need any lock
194
      - if you don't need any lock at a particular level omit that level
195
      - don't put anything for the BGL level
196
      - if you want all locks at a level use locking.ALL_SET as a value
197

198
    If you need to share locks (rather than acquire them exclusively) at one
199
    level you can modify self.share_locks, setting a true value (usually 1) for
200
    that level. By default locks are not shared.
201

202
    This function can also define a list of tasklets, which then will be
203
    executed in order instead of the usual LU-level CheckPrereq and Exec
204
    functions, if those are not defined by the LU.
205

206
    Examples::
207

208
      # Acquire all nodes and one instance
209
      self.needed_locks = {
210
        locking.LEVEL_NODE: locking.ALL_SET,
211
        locking.LEVEL_INSTANCE: ['instance1.example.com'],
212
      }
213
      # Acquire just two nodes
214
      self.needed_locks = {
215
        locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
216
      }
217
      # Acquire no locks
218
      self.needed_locks = {} # No, you can't leave it to the default value None
219

220
    """
221
    # The implementation of this method is mandatory only if the new LU is
222
    # concurrent, so that old LUs don't need to be changed all at the same
223
    # time.
224
    if self.REQ_BGL:
225
      self.needed_locks = {} # Exclusive LUs don't need locks.
226
    else:
227
      raise NotImplementedError
228

    
229
  def DeclareLocks(self, level):
230
    """Declare LU locking needs for a level
231

232
    While most LUs can just declare their locking needs at ExpandNames time,
233
    sometimes there's the need to calculate some locks after having acquired
234
    the ones before. This function is called just before acquiring locks at a
235
    particular level, but after acquiring the ones at lower levels, and permits
236
    such calculations. It can be used to modify self.needed_locks, and by
237
    default it does nothing.
238

239
    This function is only called if you have something already set in
240
    self.needed_locks for the level.
241

242
    @param level: Locking level which is going to be locked
243
    @type level: member of ganeti.locking.LEVELS
244

245
    """
246

    
247
  def CheckPrereq(self):
248
    """Check prerequisites for this LU.
249

250
    This method should check that the prerequisites for the execution
251
    of this LU are fulfilled. It can do internode communication, but
252
    it should be idempotent - no cluster or system changes are
253
    allowed.
254

255
    The method should raise errors.OpPrereqError in case something is
256
    not fulfilled. Its return value is ignored.
257

258
    This method should also update all the parameters of the opcode to
259
    their canonical form if it hasn't been done by ExpandNames before.
260

261
    """
262
    if self.tasklets is not None:
263
      for (idx, tl) in enumerate(self.tasklets):
264
        logging.debug("Checking prerequisites for tasklet %s/%s",
265
                      idx + 1, len(self.tasklets))
266
        tl.CheckPrereq()
267
    else:
268
      pass
269

    
270
  def Exec(self, feedback_fn):
271
    """Execute the LU.
272

273
    This method should implement the actual work. It should raise
274
    errors.OpExecError for failures that are somewhat dealt with in
275
    code, or expected.
276

277
    """
278
    if self.tasklets is not None:
279
      for (idx, tl) in enumerate(self.tasklets):
280
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
281
        tl.Exec(feedback_fn)
282
    else:
283
      raise NotImplementedError
284

    
285
  def BuildHooksEnv(self):
286
    """Build hooks environment for this LU.
287

288
    @rtype: dict
289
    @return: Dictionary containing the environment that will be used for
290
      running the hooks for this LU. The keys of the dict must not be prefixed
291
      with "GANETI_"--that'll be added by the hooks runner. The hooks runner
292
      will extend the environment with additional variables. If no environment
293
      should be defined, an empty dictionary should be returned (not C{None}).
294
    @note: If the C{HPATH} attribute of the LU class is C{None}, this function
295
      will not be called.
296

297
    """
298
    raise NotImplementedError
299

    
300
  def BuildHooksNodes(self):
301
    """Build list of nodes to run LU's hooks.
302

303
    @rtype: tuple; (list, list)
304
    @return: Tuple containing a list of node names on which the hook
305
      should run before the execution and a list of node names on which the
306
      hook should run after the execution. No nodes should be returned as an
307
      empty list (and not None).
308
    @note: If the C{HPATH} attribute of the LU class is C{None}, this function
309
      will not be called.
310

311
    """
312
    raise NotImplementedError
313

    
314
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
315
    """Notify the LU about the results of its hooks.
316

317
    This method is called every time a hooks phase is executed, and notifies
318
    the Logical Unit about the hooks' result. The LU can then use it to alter
319
    its result based on the hooks.  By default the method does nothing and the
320
    previous result is passed back unchanged but any LU can define it if it
321
    wants to use the local cluster hook-scripts somehow.
322

323
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
324
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
325
    @param hook_results: the results of the multi-node hooks rpc call
326
    @param feedback_fn: function used send feedback back to the caller
327
    @param lu_result: the previous Exec result this LU had, or None
328
        in the PRE phase
329
    @return: the new Exec result, based on the previous result
330
        and hook results
331

332
    """
333
    # API must be kept, thus we ignore the unused argument and could
334
    # be a function warnings
335
    # pylint: disable-msg=W0613,R0201
336
    return lu_result
337

    
338
  def _ExpandAndLockInstance(self):
339
    """Helper function to expand and lock an instance.
340

341
    Many LUs that work on an instance take its name in self.op.instance_name
342
    and need to expand it and then declare the expanded name for locking. This
343
    function does it, and then updates self.op.instance_name to the expanded
344
    name. It also initializes needed_locks as a dict, if this hasn't been done
345
    before.
346

347
    """
348
    if self.needed_locks is None:
349
      self.needed_locks = {}
350
    else:
351
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
352
        "_ExpandAndLockInstance called with instance-level locks set"
353
    self.op.instance_name = _ExpandInstanceName(self.cfg,
354
                                                self.op.instance_name)
355
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
356

    
357
  def _LockInstancesNodes(self, primary_only=False):
358
    """Helper function to declare instances' nodes for locking.
359

360
    This function should be called after locking one or more instances to lock
361
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
362
    with all primary or secondary nodes for instances already locked and
363
    present in self.needed_locks[locking.LEVEL_INSTANCE].
364

365
    It should be called from DeclareLocks, and for safety only works if
366
    self.recalculate_locks[locking.LEVEL_NODE] is set.
367

368
    In the future it may grow parameters to just lock some instance's nodes, or
369
    to just lock primaries or secondary nodes, if needed.
370

371
    If should be called in DeclareLocks in a way similar to::
372

373
      if level == locking.LEVEL_NODE:
374
        self._LockInstancesNodes()
375

376
    @type primary_only: boolean
377
    @param primary_only: only lock primary nodes of locked instances
378

379
    """
380
    assert locking.LEVEL_NODE in self.recalculate_locks, \
381
      "_LockInstancesNodes helper function called with no nodes to recalculate"
382

    
383
    # TODO: check if we're really been called with the instance locks held
384

    
385
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
386
    # future we might want to have different behaviors depending on the value
387
    # of self.recalculate_locks[locking.LEVEL_NODE]
388
    wanted_nodes = []
389
    for instance_name in self.glm.list_owned(locking.LEVEL_INSTANCE):
390
      instance = self.context.cfg.GetInstanceInfo(instance_name)
391
      wanted_nodes.append(instance.primary_node)
392
      if not primary_only:
393
        wanted_nodes.extend(instance.secondary_nodes)
394

    
395
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
396
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
397
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
398
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
399

    
400
    del self.recalculate_locks[locking.LEVEL_NODE]
401

    
402

    
403
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
404
  """Simple LU which runs no hooks.
405

406
  This LU is intended as a parent for other LogicalUnits which will
407
  run no hooks, in order to reduce duplicate code.
408

409
  """
410
  HPATH = None
411
  HTYPE = None
412

    
413
  def BuildHooksEnv(self):
414
    """Empty BuildHooksEnv for NoHooksLu.
415

416
    This just raises an error.
417

418
    """
419
    raise AssertionError("BuildHooksEnv called for NoHooksLUs")
420

    
421
  def BuildHooksNodes(self):
422
    """Empty BuildHooksNodes for NoHooksLU.
423

424
    """
425
    raise AssertionError("BuildHooksNodes called for NoHooksLU")
426

    
427

    
428
class Tasklet:
429
  """Tasklet base class.
430

431
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
432
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
433
  tasklets know nothing about locks.
434

435
  Subclasses must follow these rules:
436
    - Implement CheckPrereq
437
    - Implement Exec
438

439
  """
440
  def __init__(self, lu):
441
    self.lu = lu
442

    
443
    # Shortcuts
444
    self.cfg = lu.cfg
445
    self.rpc = lu.rpc
446

    
447
  def CheckPrereq(self):
448
    """Check prerequisites for this tasklets.
449

450
    This method should check whether the prerequisites for the execution of
451
    this tasklet are fulfilled. It can do internode communication, but it
452
    should be idempotent - no cluster or system changes are allowed.
453

454
    The method should raise errors.OpPrereqError in case something is not
455
    fulfilled. Its return value is ignored.
456

457
    This method should also update all parameters to their canonical form if it
458
    hasn't been done before.
459

460
    """
461
    pass
462

    
463
  def Exec(self, feedback_fn):
464
    """Execute the tasklet.
465

466
    This method should implement the actual work. It should raise
467
    errors.OpExecError for failures that are somewhat dealt with in code, or
468
    expected.
469

470
    """
471
    raise NotImplementedError
472

    
473

    
474
class _QueryBase:
475
  """Base for query utility classes.
476

477
  """
478
  #: Attribute holding field definitions
479
  FIELDS = None
480

    
481
  def __init__(self, filter_, fields, use_locking):
482
    """Initializes this class.
483

484
    """
485
    self.use_locking = use_locking
486

    
487
    self.query = query.Query(self.FIELDS, fields, filter_=filter_,
488
                             namefield="name")
489
    self.requested_data = self.query.RequestedData()
490
    self.names = self.query.RequestedNames()
491

    
492
    # Sort only if no names were requested
493
    self.sort_by_name = not self.names
494

    
495
    self.do_locking = None
496
    self.wanted = None
497

    
498
  def _GetNames(self, lu, all_names, lock_level):
499
    """Helper function to determine names asked for in the query.
500

501
    """
502
    if self.do_locking:
503
      names = lu.glm.list_owned(lock_level)
504
    else:
505
      names = all_names
506

    
507
    if self.wanted == locking.ALL_SET:
508
      assert not self.names
509
      # caller didn't specify names, so ordering is not important
510
      return utils.NiceSort(names)
511

    
512
    # caller specified names and we must keep the same order
513
    assert self.names
514
    assert not self.do_locking or lu.glm.is_owned(lock_level)
515

    
516
    missing = set(self.wanted).difference(names)
517
    if missing:
518
      raise errors.OpExecError("Some items were removed before retrieving"
519
                               " their data: %s" % missing)
520

    
521
    # Return expanded names
522
    return self.wanted
523

    
524
  def ExpandNames(self, lu):
525
    """Expand names for this query.
526

527
    See L{LogicalUnit.ExpandNames}.
528

529
    """
530
    raise NotImplementedError()
531

    
532
  def DeclareLocks(self, lu, level):
533
    """Declare locks for this query.
534

535
    See L{LogicalUnit.DeclareLocks}.
536

537
    """
538
    raise NotImplementedError()
539

    
540
  def _GetQueryData(self, lu):
541
    """Collects all data for this query.
542

543
    @return: Query data object
544

545
    """
546
    raise NotImplementedError()
547

    
548
  def NewStyleQuery(self, lu):
549
    """Collect data and execute query.
550

551
    """
552
    return query.GetQueryResponse(self.query, self._GetQueryData(lu),
553
                                  sort_by_name=self.sort_by_name)
554

    
555
  def OldStyleQuery(self, lu):
556
    """Collect data and execute query.
557

558
    """
559
    return self.query.OldStyleQuery(self._GetQueryData(lu),
560
                                    sort_by_name=self.sort_by_name)
561

    
562

    
563
def _GetWantedNodes(lu, nodes):
564
  """Returns list of checked and expanded node names.
565

566
  @type lu: L{LogicalUnit}
567
  @param lu: the logical unit on whose behalf we execute
568
  @type nodes: list
569
  @param nodes: list of node names or None for all nodes
570
  @rtype: list
571
  @return: the list of nodes, sorted
572
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
573

574
  """
575
  if nodes:
576
    return [_ExpandNodeName(lu.cfg, name) for name in nodes]
577

    
578
  return utils.NiceSort(lu.cfg.GetNodeList())
579

    
580

    
581
def _GetWantedInstances(lu, instances):
582
  """Returns list of checked and expanded instance names.
583

584
  @type lu: L{LogicalUnit}
585
  @param lu: the logical unit on whose behalf we execute
586
  @type instances: list
587
  @param instances: list of instance names or None for all instances
588
  @rtype: list
589
  @return: the list of instances, sorted
590
  @raise errors.OpPrereqError: if the instances parameter is wrong type
591
  @raise errors.OpPrereqError: if any of the passed instances is not found
592

593
  """
594
  if instances:
595
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
596
  else:
597
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
598
  return wanted
599

    
600

    
601
def _GetUpdatedParams(old_params, update_dict,
602
                      use_default=True, use_none=False):
603
  """Return the new version of a parameter dictionary.
604

605
  @type old_params: dict
606
  @param old_params: old parameters
607
  @type update_dict: dict
608
  @param update_dict: dict containing new parameter values, or
609
      constants.VALUE_DEFAULT to reset the parameter to its default
610
      value
611
  @param use_default: boolean
612
  @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
613
      values as 'to be deleted' values
614
  @param use_none: boolean
615
  @type use_none: whether to recognise C{None} values as 'to be
616
      deleted' values
617
  @rtype: dict
618
  @return: the new parameter dictionary
619

620
  """
621
  params_copy = copy.deepcopy(old_params)
622
  for key, val in update_dict.iteritems():
623
    if ((use_default and val == constants.VALUE_DEFAULT) or
624
        (use_none and val is None)):
625
      try:
626
        del params_copy[key]
627
      except KeyError:
628
        pass
629
    else:
630
      params_copy[key] = val
631
  return params_copy
632

    
633

    
634
def _ReleaseLocks(lu, level, names=None, keep=None):
635
  """Releases locks owned by an LU.
636

637
  @type lu: L{LogicalUnit}
638
  @param level: Lock level
639
  @type names: list or None
640
  @param names: Names of locks to release
641
  @type keep: list or None
642
  @param keep: Names of locks to retain
643

644
  """
645
  assert not (keep is not None and names is not None), \
646
         "Only one of the 'names' and the 'keep' parameters can be given"
647

    
648
  if names is not None:
649
    should_release = names.__contains__
650
  elif keep:
651
    should_release = lambda name: name not in keep
652
  else:
653
    should_release = None
654

    
655
  if should_release:
656
    retain = []
657
    release = []
658

    
659
    # Determine which locks to release
660
    for name in lu.glm.list_owned(level):
661
      if should_release(name):
662
        release.append(name)
663
      else:
664
        retain.append(name)
665

    
666
    assert len(lu.glm.list_owned(level)) == (len(retain) + len(release))
667

    
668
    # Release just some locks
669
    lu.glm.release(level, names=release)
670

    
671
    assert frozenset(lu.glm.list_owned(level)) == frozenset(retain)
672
  else:
673
    # Release everything
674
    lu.glm.release(level)
675

    
676
    assert not lu.glm.is_owned(level), "No locks should be owned"
677

    
678

    
679
def _RunPostHook(lu, node_name):
680
  """Runs the post-hook for an opcode on a single node.
681

682
  """
683
  hm = lu.proc.hmclass(lu.rpc.call_hooks_runner, lu)
684
  try:
685
    hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
686
  except:
687
    # pylint: disable-msg=W0702
688
    lu.LogWarning("Errors occurred running hooks on %s" % node_name)
689

    
690

    
691
def _CheckOutputFields(static, dynamic, selected):
692
  """Checks whether all selected fields are valid.
693

694
  @type static: L{utils.FieldSet}
695
  @param static: static fields set
696
  @type dynamic: L{utils.FieldSet}
697
  @param dynamic: dynamic fields set
698

699
  """
700
  f = utils.FieldSet()
701
  f.Extend(static)
702
  f.Extend(dynamic)
703

    
704
  delta = f.NonMatching(selected)
705
  if delta:
706
    raise errors.OpPrereqError("Unknown output fields selected: %s"
707
                               % ",".join(delta), errors.ECODE_INVAL)
708

    
709

    
710
def _CheckGlobalHvParams(params):
711
  """Validates that given hypervisor params are not global ones.
712

713
  This will ensure that instances don't get customised versions of
714
  global params.
715

716
  """
717
  used_globals = constants.HVC_GLOBALS.intersection(params)
718
  if used_globals:
719
    msg = ("The following hypervisor parameters are global and cannot"
720
           " be customized at instance level, please modify them at"
721
           " cluster level: %s" % utils.CommaJoin(used_globals))
722
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
723

    
724

    
725
def _CheckNodeOnline(lu, node, msg=None):
726
  """Ensure that a given node is online.
727

728
  @param lu: the LU on behalf of which we make the check
729
  @param node: the node to check
730
  @param msg: if passed, should be a message to replace the default one
731
  @raise errors.OpPrereqError: if the node is offline
732

733
  """
734
  if msg is None:
735
    msg = "Can't use offline node"
736
  if lu.cfg.GetNodeInfo(node).offline:
737
    raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
738

    
739

    
740
def _CheckNodeNotDrained(lu, node):
741
  """Ensure that a given node is not drained.
742

743
  @param lu: the LU on behalf of which we make the check
744
  @param node: the node to check
745
  @raise errors.OpPrereqError: if the node is drained
746

747
  """
748
  if lu.cfg.GetNodeInfo(node).drained:
749
    raise errors.OpPrereqError("Can't use drained node %s" % node,
750
                               errors.ECODE_STATE)
751

    
752

    
753
def _CheckNodeVmCapable(lu, node):
754
  """Ensure that a given node is vm capable.
755

756
  @param lu: the LU on behalf of which we make the check
757
  @param node: the node to check
758
  @raise errors.OpPrereqError: if the node is not vm capable
759

760
  """
761
  if not lu.cfg.GetNodeInfo(node).vm_capable:
762
    raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
763
                               errors.ECODE_STATE)
764

    
765

    
766
def _CheckNodeHasOS(lu, node, os_name, force_variant):
767
  """Ensure that a node supports a given OS.
768

769
  @param lu: the LU on behalf of which we make the check
770
  @param node: the node to check
771
  @param os_name: the OS to query about
772
  @param force_variant: whether to ignore variant errors
773
  @raise errors.OpPrereqError: if the node is not supporting the OS
774

775
  """
776
  result = lu.rpc.call_os_get(node, os_name)
777
  result.Raise("OS '%s' not in supported OS list for node %s" %
778
               (os_name, node),
779
               prereq=True, ecode=errors.ECODE_INVAL)
780
  if not force_variant:
781
    _CheckOSVariant(result.payload, os_name)
782

    
783

    
784
def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
785
  """Ensure that a node has the given secondary ip.
786

787
  @type lu: L{LogicalUnit}
788
  @param lu: the LU on behalf of which we make the check
789
  @type node: string
790
  @param node: the node to check
791
  @type secondary_ip: string
792
  @param secondary_ip: the ip to check
793
  @type prereq: boolean
794
  @param prereq: whether to throw a prerequisite or an execute error
795
  @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
796
  @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
797

798
  """
799
  result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
800
  result.Raise("Failure checking secondary ip on node %s" % node,
801
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
802
  if not result.payload:
803
    msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
804
           " please fix and re-run this command" % secondary_ip)
805
    if prereq:
806
      raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
807
    else:
808
      raise errors.OpExecError(msg)
809

    
810

    
811
def _GetClusterDomainSecret():
812
  """Reads the cluster domain secret.
813

814
  """
815
  return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
816
                               strict=True)
817

    
818

    
819
def _CheckInstanceDown(lu, instance, reason):
820
  """Ensure that an instance is not running."""
821
  if instance.admin_up:
822
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
823
                               (instance.name, reason), errors.ECODE_STATE)
824

    
825
  pnode = instance.primary_node
826
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
827
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
828
              prereq=True, ecode=errors.ECODE_ENVIRON)
829

    
830
  if instance.name in ins_l.payload:
831
    raise errors.OpPrereqError("Instance %s is running, %s" %
832
                               (instance.name, reason), errors.ECODE_STATE)
833

    
834

    
835
def _ExpandItemName(fn, name, kind):
836
  """Expand an item name.
837

838
  @param fn: the function to use for expansion
839
  @param name: requested item name
840
  @param kind: text description ('Node' or 'Instance')
841
  @return: the resolved (full) name
842
  @raise errors.OpPrereqError: if the item is not found
843

844
  """
845
  full_name = fn(name)
846
  if full_name is None:
847
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
848
                               errors.ECODE_NOENT)
849
  return full_name
850

    
851

    
852
def _ExpandNodeName(cfg, name):
853
  """Wrapper over L{_ExpandItemName} for nodes."""
854
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
855

    
856

    
857
def _ExpandInstanceName(cfg, name):
858
  """Wrapper over L{_ExpandItemName} for instance."""
859
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
860

    
861

    
862
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
863
                          memory, vcpus, nics, disk_template, disks,
864
                          bep, hvp, hypervisor_name):
865
  """Builds instance related env variables for hooks
866

867
  This builds the hook environment from individual variables.
868

869
  @type name: string
870
  @param name: the name of the instance
871
  @type primary_node: string
872
  @param primary_node: the name of the instance's primary node
873
  @type secondary_nodes: list
874
  @param secondary_nodes: list of secondary nodes as strings
875
  @type os_type: string
876
  @param os_type: the name of the instance's OS
877
  @type status: boolean
878
  @param status: the should_run status of the instance
879
  @type memory: string
880
  @param memory: the memory size of the instance
881
  @type vcpus: string
882
  @param vcpus: the count of VCPUs the instance has
883
  @type nics: list
884
  @param nics: list of tuples (ip, mac, mode, link) representing
885
      the NICs the instance has
886
  @type disk_template: string
887
  @param disk_template: the disk template of the instance
888
  @type disks: list
889
  @param disks: the list of (size, mode) pairs
890
  @type bep: dict
891
  @param bep: the backend parameters for the instance
892
  @type hvp: dict
893
  @param hvp: the hypervisor parameters for the instance
894
  @type hypervisor_name: string
895
  @param hypervisor_name: the hypervisor for the instance
896
  @rtype: dict
897
  @return: the hook environment for this instance
898

899
  """
900
  if status:
901
    str_status = "up"
902
  else:
903
    str_status = "down"
904
  env = {
905
    "OP_TARGET": name,
906
    "INSTANCE_NAME": name,
907
    "INSTANCE_PRIMARY": primary_node,
908
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
909
    "INSTANCE_OS_TYPE": os_type,
910
    "INSTANCE_STATUS": str_status,
911
    "INSTANCE_MEMORY": memory,
912
    "INSTANCE_VCPUS": vcpus,
913
    "INSTANCE_DISK_TEMPLATE": disk_template,
914
    "INSTANCE_HYPERVISOR": hypervisor_name,
915
  }
916

    
917
  if nics:
918
    nic_count = len(nics)
919
    for idx, (ip, mac, mode, link) in enumerate(nics):
920
      if ip is None:
921
        ip = ""
922
      env["INSTANCE_NIC%d_IP" % idx] = ip
923
      env["INSTANCE_NIC%d_MAC" % idx] = mac
924
      env["INSTANCE_NIC%d_MODE" % idx] = mode
925
      env["INSTANCE_NIC%d_LINK" % idx] = link
926
      if mode == constants.NIC_MODE_BRIDGED:
927
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
928
  else:
929
    nic_count = 0
930

    
931
  env["INSTANCE_NIC_COUNT"] = nic_count
932

    
933
  if disks:
934
    disk_count = len(disks)
935
    for idx, (size, mode) in enumerate(disks):
936
      env["INSTANCE_DISK%d_SIZE" % idx] = size
937
      env["INSTANCE_DISK%d_MODE" % idx] = mode
938
  else:
939
    disk_count = 0
940

    
941
  env["INSTANCE_DISK_COUNT"] = disk_count
942

    
943
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
944
    for key, value in source.items():
945
      env["INSTANCE_%s_%s" % (kind, key)] = value
946

    
947
  return env
948

    
949

    
950
def _NICListToTuple(lu, nics):
951
  """Build a list of nic information tuples.
952

953
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
954
  value in LUInstanceQueryData.
955

956
  @type lu:  L{LogicalUnit}
957
  @param lu: the logical unit on whose behalf we execute
958
  @type nics: list of L{objects.NIC}
959
  @param nics: list of nics to convert to hooks tuples
960

961
  """
962
  hooks_nics = []
963
  cluster = lu.cfg.GetClusterInfo()
964
  for nic in nics:
965
    ip = nic.ip
966
    mac = nic.mac
967
    filled_params = cluster.SimpleFillNIC(nic.nicparams)
968
    mode = filled_params[constants.NIC_MODE]
969
    link = filled_params[constants.NIC_LINK]
970
    hooks_nics.append((ip, mac, mode, link))
971
  return hooks_nics
972

    
973

    
974
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
975
  """Builds instance related env variables for hooks from an object.
976

977
  @type lu: L{LogicalUnit}
978
  @param lu: the logical unit on whose behalf we execute
979
  @type instance: L{objects.Instance}
980
  @param instance: the instance for which we should build the
981
      environment
982
  @type override: dict
983
  @param override: dictionary with key/values that will override
984
      our values
985
  @rtype: dict
986
  @return: the hook environment dictionary
987

988
  """
989
  cluster = lu.cfg.GetClusterInfo()
990
  bep = cluster.FillBE(instance)
991
  hvp = cluster.FillHV(instance)
992
  args = {
993
    'name': instance.name,
994
    'primary_node': instance.primary_node,
995
    'secondary_nodes': instance.secondary_nodes,
996
    'os_type': instance.os,
997
    'status': instance.admin_up,
998
    'memory': bep[constants.BE_MEMORY],
999
    'vcpus': bep[constants.BE_VCPUS],
1000
    'nics': _NICListToTuple(lu, instance.nics),
1001
    'disk_template': instance.disk_template,
1002
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
1003
    'bep': bep,
1004
    'hvp': hvp,
1005
    'hypervisor_name': instance.hypervisor,
1006
  }
1007
  if override:
1008
    args.update(override)
1009
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
1010

    
1011

    
1012
def _AdjustCandidatePool(lu, exceptions):
1013
  """Adjust the candidate pool after node operations.
1014

1015
  """
1016
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
1017
  if mod_list:
1018
    lu.LogInfo("Promoted nodes to master candidate role: %s",
1019
               utils.CommaJoin(node.name for node in mod_list))
1020
    for name in mod_list:
1021
      lu.context.ReaddNode(name)
1022
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1023
  if mc_now > mc_max:
1024
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
1025
               (mc_now, mc_max))
1026

    
1027

    
1028
def _DecideSelfPromotion(lu, exceptions=None):
1029
  """Decide whether I should promote myself as a master candidate.
1030

1031
  """
1032
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
1033
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1034
  # the new node will increase mc_max with one, so:
1035
  mc_should = min(mc_should + 1, cp_size)
1036
  return mc_now < mc_should
1037

    
1038

    
1039
def _CheckNicsBridgesExist(lu, target_nics, target_node):
1040
  """Check that the brigdes needed by a list of nics exist.
1041

1042
  """
1043
  cluster = lu.cfg.GetClusterInfo()
1044
  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
1045
  brlist = [params[constants.NIC_LINK] for params in paramslist
1046
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
1047
  if brlist:
1048
    result = lu.rpc.call_bridges_exist(target_node, brlist)
1049
    result.Raise("Error checking bridges on destination node '%s'" %
1050
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
1051

    
1052

    
1053
def _CheckInstanceBridgesExist(lu, instance, node=None):
1054
  """Check that the brigdes needed by an instance exist.
1055

1056
  """
1057
  if node is None:
1058
    node = instance.primary_node
1059
  _CheckNicsBridgesExist(lu, instance.nics, node)
1060

    
1061

    
1062
def _CheckOSVariant(os_obj, name):
1063
  """Check whether an OS name conforms to the os variants specification.
1064

1065
  @type os_obj: L{objects.OS}
1066
  @param os_obj: OS object to check
1067
  @type name: string
1068
  @param name: OS name passed by the user, to check for validity
1069

1070
  """
1071
  if not os_obj.supported_variants:
1072
    return
1073
  variant = objects.OS.GetVariant(name)
1074
  if not variant:
1075
    raise errors.OpPrereqError("OS name must include a variant",
1076
                               errors.ECODE_INVAL)
1077

    
1078
  if variant not in os_obj.supported_variants:
1079
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1080

    
1081

    
1082
def _GetNodeInstancesInner(cfg, fn):
1083
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1084

    
1085

    
1086
def _GetNodeInstances(cfg, node_name):
1087
  """Returns a list of all primary and secondary instances on a node.
1088

1089
  """
1090

    
1091
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1092

    
1093

    
1094
def _GetNodePrimaryInstances(cfg, node_name):
1095
  """Returns primary instances on a node.
1096

1097
  """
1098
  return _GetNodeInstancesInner(cfg,
1099
                                lambda inst: node_name == inst.primary_node)
1100

    
1101

    
1102
def _GetNodeSecondaryInstances(cfg, node_name):
1103
  """Returns secondary instances on a node.
1104

1105
  """
1106
  return _GetNodeInstancesInner(cfg,
1107
                                lambda inst: node_name in inst.secondary_nodes)
1108

    
1109

    
1110
def _GetStorageTypeArgs(cfg, storage_type):
1111
  """Returns the arguments for a storage type.
1112

1113
  """
1114
  # Special case for file storage
1115
  if storage_type == constants.ST_FILE:
1116
    # storage.FileStorage wants a list of storage directories
1117
    return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
1118

    
1119
  return []
1120

    
1121

    
1122
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1123
  faulty = []
1124

    
1125
  for dev in instance.disks:
1126
    cfg.SetDiskID(dev, node_name)
1127

    
1128
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1129
  result.Raise("Failed to get disk status from node %s" % node_name,
1130
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
1131

    
1132
  for idx, bdev_status in enumerate(result.payload):
1133
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1134
      faulty.append(idx)
1135

    
1136
  return faulty
1137

    
1138

    
1139
def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1140
  """Check the sanity of iallocator and node arguments and use the
1141
  cluster-wide iallocator if appropriate.
1142

1143
  Check that at most one of (iallocator, node) is specified. If none is
1144
  specified, then the LU's opcode's iallocator slot is filled with the
1145
  cluster-wide default iallocator.
1146

1147
  @type iallocator_slot: string
1148
  @param iallocator_slot: the name of the opcode iallocator slot
1149
  @type node_slot: string
1150
  @param node_slot: the name of the opcode target node slot
1151

1152
  """
1153
  node = getattr(lu.op, node_slot, None)
1154
  iallocator = getattr(lu.op, iallocator_slot, None)
1155

    
1156
  if node is not None and iallocator is not None:
1157
    raise errors.OpPrereqError("Do not specify both, iallocator and node",
1158
                               errors.ECODE_INVAL)
1159
  elif node is None and iallocator is None:
1160
    default_iallocator = lu.cfg.GetDefaultIAllocator()
1161
    if default_iallocator:
1162
      setattr(lu.op, iallocator_slot, default_iallocator)
1163
    else:
1164
      raise errors.OpPrereqError("No iallocator or node given and no"
1165
                                 " cluster-wide default iallocator found;"
1166
                                 " please specify either an iallocator or a"
1167
                                 " node, or set a cluster-wide default"
1168
                                 " iallocator")
1169

    
1170

    
1171
class LUClusterPostInit(LogicalUnit):
1172
  """Logical unit for running hooks after cluster initialization.
1173

1174
  """
1175
  HPATH = "cluster-init"
1176
  HTYPE = constants.HTYPE_CLUSTER
1177

    
1178
  def BuildHooksEnv(self):
1179
    """Build hooks env.
1180

1181
    """
1182
    return {
1183
      "OP_TARGET": self.cfg.GetClusterName(),
1184
      }
1185

    
1186
  def BuildHooksNodes(self):
1187
    """Build hooks nodes.
1188

1189
    """
1190
    return ([], [self.cfg.GetMasterNode()])
1191

    
1192
  def Exec(self, feedback_fn):
1193
    """Nothing to do.
1194

1195
    """
1196
    return True
1197

    
1198

    
1199
class LUClusterDestroy(LogicalUnit):
1200
  """Logical unit for destroying the cluster.
1201

1202
  """
1203
  HPATH = "cluster-destroy"
1204
  HTYPE = constants.HTYPE_CLUSTER
1205

    
1206
  def BuildHooksEnv(self):
1207
    """Build hooks env.
1208

1209
    """
1210
    return {
1211
      "OP_TARGET": self.cfg.GetClusterName(),
1212
      }
1213

    
1214
  def BuildHooksNodes(self):
1215
    """Build hooks nodes.
1216

1217
    """
1218
    return ([], [])
1219

    
1220
  def CheckPrereq(self):
1221
    """Check prerequisites.
1222

1223
    This checks whether the cluster is empty.
1224

1225
    Any errors are signaled by raising errors.OpPrereqError.
1226

1227
    """
1228
    master = self.cfg.GetMasterNode()
1229

    
1230
    nodelist = self.cfg.GetNodeList()
1231
    if len(nodelist) != 1 or nodelist[0] != master:
1232
      raise errors.OpPrereqError("There are still %d node(s) in"
1233
                                 " this cluster." % (len(nodelist) - 1),
1234
                                 errors.ECODE_INVAL)
1235
    instancelist = self.cfg.GetInstanceList()
1236
    if instancelist:
1237
      raise errors.OpPrereqError("There are still %d instance(s) in"
1238
                                 " this cluster." % len(instancelist),
1239
                                 errors.ECODE_INVAL)
1240

    
1241
  def Exec(self, feedback_fn):
1242
    """Destroys the cluster.
1243

1244
    """
1245
    master = self.cfg.GetMasterNode()
1246

    
1247
    # Run post hooks on master node before it's removed
1248
    _RunPostHook(self, master)
1249

    
1250
    result = self.rpc.call_node_stop_master(master, False)
1251
    result.Raise("Could not disable the master role")
1252

    
1253
    return master
1254

    
1255

    
1256
def _VerifyCertificate(filename):
1257
  """Verifies a certificate for LUClusterVerify.
1258

1259
  @type filename: string
1260
  @param filename: Path to PEM file
1261

1262
  """
1263
  try:
1264
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1265
                                           utils.ReadFile(filename))
1266
  except Exception, err: # pylint: disable-msg=W0703
1267
    return (LUClusterVerify.ETYPE_ERROR,
1268
            "Failed to load X509 certificate %s: %s" % (filename, err))
1269

    
1270
  (errcode, msg) = \
1271
    utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1272
                                constants.SSL_CERT_EXPIRATION_ERROR)
1273

    
1274
  if msg:
1275
    fnamemsg = "While verifying %s: %s" % (filename, msg)
1276
  else:
1277
    fnamemsg = None
1278

    
1279
  if errcode is None:
1280
    return (None, fnamemsg)
1281
  elif errcode == utils.CERT_WARNING:
1282
    return (LUClusterVerify.ETYPE_WARNING, fnamemsg)
1283
  elif errcode == utils.CERT_ERROR:
1284
    return (LUClusterVerify.ETYPE_ERROR, fnamemsg)
1285

    
1286
  raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1287

    
1288

    
1289
class LUClusterVerify(LogicalUnit):
1290
  """Verifies the cluster status.
1291

1292
  """
1293
  HPATH = "cluster-verify"
1294
  HTYPE = constants.HTYPE_CLUSTER
1295
  REQ_BGL = False
1296

    
1297
  TCLUSTER = "cluster"
1298
  TNODE = "node"
1299
  TINSTANCE = "instance"
1300

    
1301
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1302
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1303
  ECLUSTERFILECHECK = (TCLUSTER, "ECLUSTERFILECHECK")
1304
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1305
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1306
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1307
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1308
  EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
1309
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1310
  EINSTANCESPLITGROUPS = (TINSTANCE, "EINSTANCESPLITGROUPS")
1311
  ENODEDRBD = (TNODE, "ENODEDRBD")
1312
  ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1313
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1314
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1315
  ENODEHV = (TNODE, "ENODEHV")
1316
  ENODELVM = (TNODE, "ENODELVM")
1317
  ENODEN1 = (TNODE, "ENODEN1")
1318
  ENODENET = (TNODE, "ENODENET")
1319
  ENODEOS = (TNODE, "ENODEOS")
1320
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1321
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1322
  ENODERPC = (TNODE, "ENODERPC")
1323
  ENODESSH = (TNODE, "ENODESSH")
1324
  ENODEVERSION = (TNODE, "ENODEVERSION")
1325
  ENODESETUP = (TNODE, "ENODESETUP")
1326
  ENODETIME = (TNODE, "ENODETIME")
1327
  ENODEOOBPATH = (TNODE, "ENODEOOBPATH")
1328

    
1329
  ETYPE_FIELD = "code"
1330
  ETYPE_ERROR = "ERROR"
1331
  ETYPE_WARNING = "WARNING"
1332

    
1333
  _HOOKS_INDENT_RE = re.compile("^", re.M)
1334

    
1335
  class NodeImage(object):
1336
    """A class representing the logical and physical status of a node.
1337

1338
    @type name: string
1339
    @ivar name: the node name to which this object refers
1340
    @ivar volumes: a structure as returned from
1341
        L{ganeti.backend.GetVolumeList} (runtime)
1342
    @ivar instances: a list of running instances (runtime)
1343
    @ivar pinst: list of configured primary instances (config)
1344
    @ivar sinst: list of configured secondary instances (config)
1345
    @ivar sbp: dictionary of {primary-node: list of instances} for all
1346
        instances for which this node is secondary (config)
1347
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1348
    @ivar dfree: free disk, as reported by the node (runtime)
1349
    @ivar offline: the offline status (config)
1350
    @type rpc_fail: boolean
1351
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1352
        not whether the individual keys were correct) (runtime)
1353
    @type lvm_fail: boolean
1354
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1355
    @type hyp_fail: boolean
1356
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1357
    @type ghost: boolean
1358
    @ivar ghost: whether this is a known node or not (config)
1359
    @type os_fail: boolean
1360
    @ivar os_fail: whether the RPC call didn't return valid OS data
1361
    @type oslist: list
1362
    @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1363
    @type vm_capable: boolean
1364
    @ivar vm_capable: whether the node can host instances
1365

1366
    """
1367
    def __init__(self, offline=False, name=None, vm_capable=True):
1368
      self.name = name
1369
      self.volumes = {}
1370
      self.instances = []
1371
      self.pinst = []
1372
      self.sinst = []
1373
      self.sbp = {}
1374
      self.mfree = 0
1375
      self.dfree = 0
1376
      self.offline = offline
1377
      self.vm_capable = vm_capable
1378
      self.rpc_fail = False
1379
      self.lvm_fail = False
1380
      self.hyp_fail = False
1381
      self.ghost = False
1382
      self.os_fail = False
1383
      self.oslist = {}
1384

    
1385
  def ExpandNames(self):
1386
    self.needed_locks = {
1387
      locking.LEVEL_NODE: locking.ALL_SET,
1388
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1389
    }
1390
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1391

    
1392
  def CheckPrereq(self):
1393
    self.all_node_info = self.cfg.GetAllNodesInfo()
1394
    self.all_inst_info = self.cfg.GetAllInstancesInfo()
1395
    self.my_node_names = utils.NiceSort(list(self.all_node_info))
1396
    self.my_node_info = self.all_node_info
1397
    self.my_inst_names = utils.NiceSort(list(self.all_inst_info))
1398
    self.my_inst_info = self.all_inst_info
1399

    
1400
  def _Error(self, ecode, item, msg, *args, **kwargs):
1401
    """Format an error message.
1402

1403
    Based on the opcode's error_codes parameter, either format a
1404
    parseable error code, or a simpler error string.
1405

1406
    This must be called only from Exec and functions called from Exec.
1407

1408
    """
1409
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1410
    itype, etxt = ecode
1411
    # first complete the msg
1412
    if args:
1413
      msg = msg % args
1414
    # then format the whole message
1415
    if self.op.error_codes:
1416
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1417
    else:
1418
      if item:
1419
        item = " " + item
1420
      else:
1421
        item = ""
1422
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1423
    # and finally report it via the feedback_fn
1424
    self._feedback_fn("  - %s" % msg)
1425

    
1426
  def _ErrorIf(self, cond, *args, **kwargs):
1427
    """Log an error message if the passed condition is True.
1428

1429
    """
1430
    cond = bool(cond) or self.op.debug_simulate_errors
1431
    if cond:
1432
      self._Error(*args, **kwargs)
1433
    # do not mark the operation as failed for WARN cases only
1434
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1435
      self.bad = self.bad or cond
1436

    
1437
  def _VerifyNode(self, ninfo, nresult):
1438
    """Perform some basic validation on data returned from a node.
1439

1440
      - check the result data structure is well formed and has all the
1441
        mandatory fields
1442
      - check ganeti version
1443

1444
    @type ninfo: L{objects.Node}
1445
    @param ninfo: the node to check
1446
    @param nresult: the results from the node
1447
    @rtype: boolean
1448
    @return: whether overall this call was successful (and we can expect
1449
         reasonable values in the respose)
1450

1451
    """
1452
    node = ninfo.name
1453
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1454

    
1455
    # main result, nresult should be a non-empty dict
1456
    test = not nresult or not isinstance(nresult, dict)
1457
    _ErrorIf(test, self.ENODERPC, node,
1458
                  "unable to verify node: no data returned")
1459
    if test:
1460
      return False
1461

    
1462
    # compares ganeti version
1463
    local_version = constants.PROTOCOL_VERSION
1464
    remote_version = nresult.get("version", None)
1465
    test = not (remote_version and
1466
                isinstance(remote_version, (list, tuple)) and
1467
                len(remote_version) == 2)
1468
    _ErrorIf(test, self.ENODERPC, node,
1469
             "connection to node returned invalid data")
1470
    if test:
1471
      return False
1472

    
1473
    test = local_version != remote_version[0]
1474
    _ErrorIf(test, self.ENODEVERSION, node,
1475
             "incompatible protocol versions: master %s,"
1476
             " node %s", local_version, remote_version[0])
1477
    if test:
1478
      return False
1479

    
1480
    # node seems compatible, we can actually try to look into its results
1481

    
1482
    # full package version
1483
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1484
                  self.ENODEVERSION, node,
1485
                  "software version mismatch: master %s, node %s",
1486
                  constants.RELEASE_VERSION, remote_version[1],
1487
                  code=self.ETYPE_WARNING)
1488

    
1489
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1490
    if ninfo.vm_capable and isinstance(hyp_result, dict):
1491
      for hv_name, hv_result in hyp_result.iteritems():
1492
        test = hv_result is not None
1493
        _ErrorIf(test, self.ENODEHV, node,
1494
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1495

    
1496
    hvp_result = nresult.get(constants.NV_HVPARAMS, None)
1497
    if ninfo.vm_capable and isinstance(hvp_result, list):
1498
      for item, hv_name, hv_result in hvp_result:
1499
        _ErrorIf(True, self.ENODEHV, node,
1500
                 "hypervisor %s parameter verify failure (source %s): %s",
1501
                 hv_name, item, hv_result)
1502

    
1503
    test = nresult.get(constants.NV_NODESETUP,
1504
                       ["Missing NODESETUP results"])
1505
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1506
             "; ".join(test))
1507

    
1508
    return True
1509

    
1510
  def _VerifyNodeTime(self, ninfo, nresult,
1511
                      nvinfo_starttime, nvinfo_endtime):
1512
    """Check the node time.
1513

1514
    @type ninfo: L{objects.Node}
1515
    @param ninfo: the node to check
1516
    @param nresult: the remote results for the node
1517
    @param nvinfo_starttime: the start time of the RPC call
1518
    @param nvinfo_endtime: the end time of the RPC call
1519

1520
    """
1521
    node = ninfo.name
1522
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1523

    
1524
    ntime = nresult.get(constants.NV_TIME, None)
1525
    try:
1526
      ntime_merged = utils.MergeTime(ntime)
1527
    except (ValueError, TypeError):
1528
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1529
      return
1530

    
1531
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1532
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1533
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1534
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1535
    else:
1536
      ntime_diff = None
1537

    
1538
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1539
             "Node time diverges by at least %s from master node time",
1540
             ntime_diff)
1541

    
1542
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1543
    """Check the node LVM results.
1544

1545
    @type ninfo: L{objects.Node}
1546
    @param ninfo: the node to check
1547
    @param nresult: the remote results for the node
1548
    @param vg_name: the configured VG name
1549

1550
    """
1551
    if vg_name is None:
1552
      return
1553

    
1554
    node = ninfo.name
1555
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1556

    
1557
    # checks vg existence and size > 20G
1558
    vglist = nresult.get(constants.NV_VGLIST, None)
1559
    test = not vglist
1560
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1561
    if not test:
1562
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1563
                                            constants.MIN_VG_SIZE)
1564
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1565

    
1566
    # check pv names
1567
    pvlist = nresult.get(constants.NV_PVLIST, None)
1568
    test = pvlist is None
1569
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1570
    if not test:
1571
      # check that ':' is not present in PV names, since it's a
1572
      # special character for lvcreate (denotes the range of PEs to
1573
      # use on the PV)
1574
      for _, pvname, owner_vg in pvlist:
1575
        test = ":" in pvname
1576
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1577
                 " '%s' of VG '%s'", pvname, owner_vg)
1578

    
1579
  def _VerifyNodeBridges(self, ninfo, nresult, bridges):
1580
    """Check the node bridges.
1581

1582
    @type ninfo: L{objects.Node}
1583
    @param ninfo: the node to check
1584
    @param nresult: the remote results for the node
1585
    @param bridges: the expected list of bridges
1586

1587
    """
1588
    if not bridges:
1589
      return
1590

    
1591
    node = ninfo.name
1592
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1593

    
1594
    missing = nresult.get(constants.NV_BRIDGES, None)
1595
    test = not isinstance(missing, list)
1596
    _ErrorIf(test, self.ENODENET, node,
1597
             "did not return valid bridge information")
1598
    if not test:
1599
      _ErrorIf(bool(missing), self.ENODENET, node, "missing bridges: %s" %
1600
               utils.CommaJoin(sorted(missing)))
1601

    
1602
  def _VerifyNodeNetwork(self, ninfo, nresult):
1603
    """Check the node network connectivity results.
1604

1605
    @type ninfo: L{objects.Node}
1606
    @param ninfo: the node to check
1607
    @param nresult: the remote results for the node
1608

1609
    """
1610
    node = ninfo.name
1611
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1612

    
1613
    test = constants.NV_NODELIST not in nresult
1614
    _ErrorIf(test, self.ENODESSH, node,
1615
             "node hasn't returned node ssh connectivity data")
1616
    if not test:
1617
      if nresult[constants.NV_NODELIST]:
1618
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1619
          _ErrorIf(True, self.ENODESSH, node,
1620
                   "ssh communication with node '%s': %s", a_node, a_msg)
1621

    
1622
    test = constants.NV_NODENETTEST not in nresult
1623
    _ErrorIf(test, self.ENODENET, node,
1624
             "node hasn't returned node tcp connectivity data")
1625
    if not test:
1626
      if nresult[constants.NV_NODENETTEST]:
1627
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1628
        for anode in nlist:
1629
          _ErrorIf(True, self.ENODENET, node,
1630
                   "tcp communication with node '%s': %s",
1631
                   anode, nresult[constants.NV_NODENETTEST][anode])
1632

    
1633
    test = constants.NV_MASTERIP not in nresult
1634
    _ErrorIf(test, self.ENODENET, node,
1635
             "node hasn't returned node master IP reachability data")
1636
    if not test:
1637
      if not nresult[constants.NV_MASTERIP]:
1638
        if node == self.master_node:
1639
          msg = "the master node cannot reach the master IP (not configured?)"
1640
        else:
1641
          msg = "cannot reach the master IP"
1642
        _ErrorIf(True, self.ENODENET, node, msg)
1643

    
1644
  def _VerifyInstance(self, instance, instanceconfig, node_image,
1645
                      diskstatus):
1646
    """Verify an instance.
1647

1648
    This function checks to see if the required block devices are
1649
    available on the instance's node.
1650

1651
    """
1652
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1653
    node_current = instanceconfig.primary_node
1654

    
1655
    node_vol_should = {}
1656
    instanceconfig.MapLVsByNode(node_vol_should)
1657

    
1658
    for node in node_vol_should:
1659
      n_img = node_image[node]
1660
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1661
        # ignore missing volumes on offline or broken nodes
1662
        continue
1663
      for volume in node_vol_should[node]:
1664
        test = volume not in n_img.volumes
1665
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1666
                 "volume %s missing on node %s", volume, node)
1667

    
1668
    if instanceconfig.admin_up:
1669
      pri_img = node_image[node_current]
1670
      test = instance not in pri_img.instances and not pri_img.offline
1671
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1672
               "instance not running on its primary node %s",
1673
               node_current)
1674

    
1675
    for node, n_img in node_image.items():
1676
      if node != node_current:
1677
        test = instance in n_img.instances
1678
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1679
                 "instance should not run on node %s", node)
1680

    
1681
    diskdata = [(nname, success, status, idx)
1682
                for (nname, disks) in diskstatus.items()
1683
                for idx, (success, status) in enumerate(disks)]
1684

    
1685
    for nname, success, bdev_status, idx in diskdata:
1686
      # the 'ghost node' construction in Exec() ensures that we have a
1687
      # node here
1688
      snode = node_image[nname]
1689
      bad_snode = snode.ghost or snode.offline
1690
      _ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
1691
               self.EINSTANCEFAULTYDISK, instance,
1692
               "couldn't retrieve status for disk/%s on %s: %s",
1693
               idx, nname, bdev_status)
1694
      _ErrorIf((instanceconfig.admin_up and success and
1695
                bdev_status.ldisk_status == constants.LDS_FAULTY),
1696
               self.EINSTANCEFAULTYDISK, instance,
1697
               "disk/%s on %s is faulty", idx, nname)
1698

    
1699
  def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
1700
    """Verify if there are any unknown volumes in the cluster.
1701

1702
    The .os, .swap and backup volumes are ignored. All other volumes are
1703
    reported as unknown.
1704

1705
    @type reserved: L{ganeti.utils.FieldSet}
1706
    @param reserved: a FieldSet of reserved volume names
1707

1708
    """
1709
    for node, n_img in node_image.items():
1710
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1711
        # skip non-healthy nodes
1712
        continue
1713
      for volume in n_img.volumes:
1714
        test = ((node not in node_vol_should or
1715
                volume not in node_vol_should[node]) and
1716
                not reserved.Matches(volume))
1717
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1718
                      "volume %s is unknown", volume)
1719

    
1720
  def _VerifyOrphanInstances(self, instancelist, node_image):
1721
    """Verify the list of running instances.
1722

1723
    This checks what instances are running but unknown to the cluster.
1724

1725
    """
1726
    for node, n_img in node_image.items():
1727
      for o_inst in n_img.instances:
1728
        test = o_inst not in instancelist
1729
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1730
                      "instance %s on node %s should not exist", o_inst, node)
1731

    
1732
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1733
    """Verify N+1 Memory Resilience.
1734

1735
    Check that if one single node dies we can still start all the
1736
    instances it was primary for.
1737

1738
    """
1739
    cluster_info = self.cfg.GetClusterInfo()
1740
    for node, n_img in node_image.items():
1741
      # This code checks that every node which is now listed as
1742
      # secondary has enough memory to host all instances it is
1743
      # supposed to should a single other node in the cluster fail.
1744
      # FIXME: not ready for failover to an arbitrary node
1745
      # FIXME: does not support file-backed instances
1746
      # WARNING: we currently take into account down instances as well
1747
      # as up ones, considering that even if they're down someone
1748
      # might want to start them even in the event of a node failure.
1749
      if n_img.offline:
1750
        # we're skipping offline nodes from the N+1 warning, since
1751
        # most likely we don't have good memory infromation from them;
1752
        # we already list instances living on such nodes, and that's
1753
        # enough warning
1754
        continue
1755
      for prinode, instances in n_img.sbp.items():
1756
        needed_mem = 0
1757
        for instance in instances:
1758
          bep = cluster_info.FillBE(instance_cfg[instance])
1759
          if bep[constants.BE_AUTO_BALANCE]:
1760
            needed_mem += bep[constants.BE_MEMORY]
1761
        test = n_img.mfree < needed_mem
1762
        self._ErrorIf(test, self.ENODEN1, node,
1763
                      "not enough memory to accomodate instance failovers"
1764
                      " should node %s fail (%dMiB needed, %dMiB available)",
1765
                      prinode, needed_mem, n_img.mfree)
1766

    
1767
  @classmethod
1768
  def _VerifyFiles(cls, errorif, nodeinfo, master_node, all_nvinfo,
1769
                   (files_all, files_all_opt, files_mc, files_vm)):
1770
    """Verifies file checksums collected from all nodes.
1771

1772
    @param errorif: Callback for reporting errors
1773
    @param nodeinfo: List of L{objects.Node} objects
1774
    @param master_node: Name of master node
1775
    @param all_nvinfo: RPC results
1776

1777
    """
1778
    node_names = frozenset(node.name for node in nodeinfo)
1779

    
1780
    assert master_node in node_names
1781
    assert (len(files_all | files_all_opt | files_mc | files_vm) ==
1782
            sum(map(len, [files_all, files_all_opt, files_mc, files_vm]))), \
1783
           "Found file listed in more than one file list"
1784

    
1785
    # Define functions determining which nodes to consider for a file
1786
    file2nodefn = dict([(filename, fn)
1787
      for (files, fn) in [(files_all, None),
1788
                          (files_all_opt, None),
1789
                          (files_mc, lambda node: (node.master_candidate or
1790
                                                   node.name == master_node)),
1791
                          (files_vm, lambda node: node.vm_capable)]
1792
      for filename in files])
1793

    
1794
    fileinfo = dict((filename, {}) for filename in file2nodefn.keys())
1795

    
1796
    for node in nodeinfo:
1797
      nresult = all_nvinfo[node.name]
1798

    
1799
      if nresult.fail_msg or not nresult.payload:
1800
        node_files = None
1801
      else:
1802
        node_files = nresult.payload.get(constants.NV_FILELIST, None)
1803

    
1804
      test = not (node_files and isinstance(node_files, dict))
1805
      errorif(test, cls.ENODEFILECHECK, node.name,
1806
              "Node did not return file checksum data")
1807
      if test:
1808
        continue
1809

    
1810
      for (filename, checksum) in node_files.items():
1811
        # Check if the file should be considered for a node
1812
        fn = file2nodefn[filename]
1813
        if fn is None or fn(node):
1814
          fileinfo[filename].setdefault(checksum, set()).add(node.name)
1815

    
1816
    for (filename, checksums) in fileinfo.items():
1817
      assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum"
1818

    
1819
      # Nodes having the file
1820
      with_file = frozenset(node_name
1821
                            for nodes in fileinfo[filename].values()
1822
                            for node_name in nodes)
1823

    
1824
      # Nodes missing file
1825
      missing_file = node_names - with_file
1826

    
1827
      if filename in files_all_opt:
1828
        # All or no nodes
1829
        errorif(missing_file and missing_file != node_names,
1830
                cls.ECLUSTERFILECHECK, None,
1831
                "File %s is optional, but it must exist on all or no nodes (not"
1832
                " found on %s)",
1833
                filename, utils.CommaJoin(utils.NiceSort(missing_file)))
1834
      else:
1835
        errorif(missing_file, cls.ECLUSTERFILECHECK, None,
1836
                "File %s is missing from node(s) %s", filename,
1837
                utils.CommaJoin(utils.NiceSort(missing_file)))
1838

    
1839
      # See if there are multiple versions of the file
1840
      test = len(checksums) > 1
1841
      if test:
1842
        variants = ["variant %s on %s" %
1843
                    (idx + 1, utils.CommaJoin(utils.NiceSort(nodes)))
1844
                    for (idx, (checksum, nodes)) in
1845
                      enumerate(sorted(checksums.items()))]
1846
      else:
1847
        variants = []
1848

    
1849
      errorif(test, cls.ECLUSTERFILECHECK, None,
1850
              "File %s found with %s different checksums (%s)",
1851
              filename, len(checksums), "; ".join(variants))
1852

    
1853
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
1854
                      drbd_map):
1855
    """Verifies and the node DRBD status.
1856

1857
    @type ninfo: L{objects.Node}
1858
    @param ninfo: the node to check
1859
    @param nresult: the remote results for the node
1860
    @param instanceinfo: the dict of instances
1861
    @param drbd_helper: the configured DRBD usermode helper
1862
    @param drbd_map: the DRBD map as returned by
1863
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1864

1865
    """
1866
    node = ninfo.name
1867
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1868

    
1869
    if drbd_helper:
1870
      helper_result = nresult.get(constants.NV_DRBDHELPER, None)
1871
      test = (helper_result == None)
1872
      _ErrorIf(test, self.ENODEDRBDHELPER, node,
1873
               "no drbd usermode helper returned")
1874
      if helper_result:
1875
        status, payload = helper_result
1876
        test = not status
1877
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
1878
                 "drbd usermode helper check unsuccessful: %s", payload)
1879
        test = status and (payload != drbd_helper)
1880
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
1881
                 "wrong drbd usermode helper: %s", payload)
1882

    
1883
    # compute the DRBD minors
1884
    node_drbd = {}
1885
    for minor, instance in drbd_map[node].items():
1886
      test = instance not in instanceinfo
1887
      _ErrorIf(test, self.ECLUSTERCFG, None,
1888
               "ghost instance '%s' in temporary DRBD map", instance)
1889
        # ghost instance should not be running, but otherwise we
1890
        # don't give double warnings (both ghost instance and
1891
        # unallocated minor in use)
1892
      if test:
1893
        node_drbd[minor] = (instance, False)
1894
      else:
1895
        instance = instanceinfo[instance]
1896
        node_drbd[minor] = (instance.name, instance.admin_up)
1897

    
1898
    # and now check them
1899
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
1900
    test = not isinstance(used_minors, (tuple, list))
1901
    _ErrorIf(test, self.ENODEDRBD, node,
1902
             "cannot parse drbd status file: %s", str(used_minors))
1903
    if test:
1904
      # we cannot check drbd status
1905
      return
1906

    
1907
    for minor, (iname, must_exist) in node_drbd.items():
1908
      test = minor not in used_minors and must_exist
1909
      _ErrorIf(test, self.ENODEDRBD, node,
1910
               "drbd minor %d of instance %s is not active", minor, iname)
1911
    for minor in used_minors:
1912
      test = minor not in node_drbd
1913
      _ErrorIf(test, self.ENODEDRBD, node,
1914
               "unallocated drbd minor %d is in use", minor)
1915

    
1916
  def _UpdateNodeOS(self, ninfo, nresult, nimg):
1917
    """Builds the node OS structures.
1918

1919
    @type ninfo: L{objects.Node}
1920
    @param ninfo: the node to check
1921
    @param nresult: the remote results for the node
1922
    @param nimg: the node image object
1923

1924
    """
1925
    node = ninfo.name
1926
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1927

    
1928
    remote_os = nresult.get(constants.NV_OSLIST, None)
1929
    test = (not isinstance(remote_os, list) or
1930
            not compat.all(isinstance(v, list) and len(v) == 7
1931
                           for v in remote_os))
1932

    
1933
    _ErrorIf(test, self.ENODEOS, node,
1934
             "node hasn't returned valid OS data")
1935

    
1936
    nimg.os_fail = test
1937

    
1938
    if test:
1939
      return
1940

    
1941
    os_dict = {}
1942

    
1943
    for (name, os_path, status, diagnose,
1944
         variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
1945

    
1946
      if name not in os_dict:
1947
        os_dict[name] = []
1948

    
1949
      # parameters is a list of lists instead of list of tuples due to
1950
      # JSON lacking a real tuple type, fix it:
1951
      parameters = [tuple(v) for v in parameters]
1952
      os_dict[name].append((os_path, status, diagnose,
1953
                            set(variants), set(parameters), set(api_ver)))
1954

    
1955
    nimg.oslist = os_dict
1956

    
1957
  def _VerifyNodeOS(self, ninfo, nimg, base):
1958
    """Verifies the node OS list.
1959

1960
    @type ninfo: L{objects.Node}
1961
    @param ninfo: the node to check
1962
    @param nimg: the node image object
1963
    @param base: the 'template' node we match against (e.g. from the master)
1964

1965
    """
1966
    node = ninfo.name
1967
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1968

    
1969
    assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
1970

    
1971
    beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
1972
    for os_name, os_data in nimg.oslist.items():
1973
      assert os_data, "Empty OS status for OS %s?!" % os_name
1974
      f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
1975
      _ErrorIf(not f_status, self.ENODEOS, node,
1976
               "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
1977
      _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
1978
               "OS '%s' has multiple entries (first one shadows the rest): %s",
1979
               os_name, utils.CommaJoin([v[0] for v in os_data]))
1980
      # this will catched in backend too
1981
      _ErrorIf(compat.any(v >= constants.OS_API_V15 for v in f_api)
1982
               and not f_var, self.ENODEOS, node,
1983
               "OS %s with API at least %d does not declare any variant",
1984
               os_name, constants.OS_API_V15)
1985
      # comparisons with the 'base' image
1986
      test = os_name not in base.oslist
1987
      _ErrorIf(test, self.ENODEOS, node,
1988
               "Extra OS %s not present on reference node (%s)",
1989
               os_name, base.name)
1990
      if test:
1991
        continue
1992
      assert base.oslist[os_name], "Base node has empty OS status?"
1993
      _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
1994
      if not b_status:
1995
        # base OS is invalid, skipping
1996
        continue
1997
      for kind, a, b in [("API version", f_api, b_api),
1998
                         ("variants list", f_var, b_var),
1999
                         ("parameters", beautify_params(f_param),
2000
                          beautify_params(b_param))]:
2001
        _ErrorIf(a != b, self.ENODEOS, node,
2002
                 "OS %s for %s differs from reference node %s: [%s] vs. [%s]",
2003
                 kind, os_name, base.name,
2004
                 utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
2005

    
2006
    # check any missing OSes
2007
    missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
2008
    _ErrorIf(missing, self.ENODEOS, node,
2009
             "OSes present on reference node %s but missing on this node: %s",
2010
             base.name, utils.CommaJoin(missing))
2011

    
2012
  def _VerifyOob(self, ninfo, nresult):
2013
    """Verifies out of band functionality of a node.
2014

2015
    @type ninfo: L{objects.Node}
2016
    @param ninfo: the node to check
2017
    @param nresult: the remote results for the node
2018

2019
    """
2020
    node = ninfo.name
2021
    # We just have to verify the paths on master and/or master candidates
2022
    # as the oob helper is invoked on the master
2023
    if ((ninfo.master_candidate or ninfo.master_capable) and
2024
        constants.NV_OOB_PATHS in nresult):
2025
      for path_result in nresult[constants.NV_OOB_PATHS]:
2026
        self._ErrorIf(path_result, self.ENODEOOBPATH, node, path_result)
2027

    
2028
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
2029
    """Verifies and updates the node volume data.
2030

2031
    This function will update a L{NodeImage}'s internal structures
2032
    with data from the remote call.
2033

2034
    @type ninfo: L{objects.Node}
2035
    @param ninfo: the node to check
2036
    @param nresult: the remote results for the node
2037
    @param nimg: the node image object
2038
    @param vg_name: the configured VG name
2039

2040
    """
2041
    node = ninfo.name
2042
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2043

    
2044
    nimg.lvm_fail = True
2045
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
2046
    if vg_name is None:
2047
      pass
2048
    elif isinstance(lvdata, basestring):
2049
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
2050
               utils.SafeEncode(lvdata))
2051
    elif not isinstance(lvdata, dict):
2052
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
2053
    else:
2054
      nimg.volumes = lvdata
2055
      nimg.lvm_fail = False
2056

    
2057
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
2058
    """Verifies and updates the node instance list.
2059

2060
    If the listing was successful, then updates this node's instance
2061
    list. Otherwise, it marks the RPC call as failed for the instance
2062
    list key.
2063

2064
    @type ninfo: L{objects.Node}
2065
    @param ninfo: the node to check
2066
    @param nresult: the remote results for the node
2067
    @param nimg: the node image object
2068

2069
    """
2070
    idata = nresult.get(constants.NV_INSTANCELIST, None)
2071
    test = not isinstance(idata, list)
2072
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
2073
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
2074
    if test:
2075
      nimg.hyp_fail = True
2076
    else:
2077
      nimg.instances = idata
2078

    
2079
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
2080
    """Verifies and computes a node information map
2081

2082
    @type ninfo: L{objects.Node}
2083
    @param ninfo: the node to check
2084
    @param nresult: the remote results for the node
2085
    @param nimg: the node image object
2086
    @param vg_name: the configured VG name
2087

2088
    """
2089
    node = ninfo.name
2090
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2091

    
2092
    # try to read free memory (from the hypervisor)
2093
    hv_info = nresult.get(constants.NV_HVINFO, None)
2094
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
2095
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
2096
    if not test:
2097
      try:
2098
        nimg.mfree = int(hv_info["memory_free"])
2099
      except (ValueError, TypeError):
2100
        _ErrorIf(True, self.ENODERPC, node,
2101
                 "node returned invalid nodeinfo, check hypervisor")
2102

    
2103
    # FIXME: devise a free space model for file based instances as well
2104
    if vg_name is not None:
2105
      test = (constants.NV_VGLIST not in nresult or
2106
              vg_name not in nresult[constants.NV_VGLIST])
2107
      _ErrorIf(test, self.ENODELVM, node,
2108
               "node didn't return data for the volume group '%s'"
2109
               " - it is either missing or broken", vg_name)
2110
      if not test:
2111
        try:
2112
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
2113
        except (ValueError, TypeError):
2114
          _ErrorIf(True, self.ENODERPC, node,
2115
                   "node returned invalid LVM info, check LVM status")
2116

    
2117
  def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
2118
    """Gets per-disk status information for all instances.
2119

2120
    @type nodelist: list of strings
2121
    @param nodelist: Node names
2122
    @type node_image: dict of (name, L{objects.Node})
2123
    @param node_image: Node objects
2124
    @type instanceinfo: dict of (name, L{objects.Instance})
2125
    @param instanceinfo: Instance objects
2126
    @rtype: {instance: {node: [(succes, payload)]}}
2127
    @return: a dictionary of per-instance dictionaries with nodes as
2128
        keys and disk information as values; the disk information is a
2129
        list of tuples (success, payload)
2130

2131
    """
2132
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2133

    
2134
    node_disks = {}
2135
    node_disks_devonly = {}
2136
    diskless_instances = set()
2137
    diskless = constants.DT_DISKLESS
2138

    
2139
    for nname in nodelist:
2140
      node_instances = list(itertools.chain(node_image[nname].pinst,
2141
                                            node_image[nname].sinst))
2142
      diskless_instances.update(inst for inst in node_instances
2143
                                if instanceinfo[inst].disk_template == diskless)
2144
      disks = [(inst, disk)
2145
               for inst in node_instances
2146
               for disk in instanceinfo[inst].disks]
2147

    
2148
      if not disks:
2149
        # No need to collect data
2150
        continue
2151

    
2152
      node_disks[nname] = disks
2153

    
2154
      # Creating copies as SetDiskID below will modify the objects and that can
2155
      # lead to incorrect data returned from nodes
2156
      devonly = [dev.Copy() for (_, dev) in disks]
2157

    
2158
      for dev in devonly:
2159
        self.cfg.SetDiskID(dev, nname)
2160

    
2161
      node_disks_devonly[nname] = devonly
2162

    
2163
    assert len(node_disks) == len(node_disks_devonly)
2164

    
2165
    # Collect data from all nodes with disks
2166
    result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2167
                                                          node_disks_devonly)
2168

    
2169
    assert len(result) == len(node_disks)
2170

    
2171
    instdisk = {}
2172

    
2173
    for (nname, nres) in result.items():
2174
      disks = node_disks[nname]
2175

    
2176
      if nres.offline:
2177
        # No data from this node
2178
        data = len(disks) * [(False, "node offline")]
2179
      else:
2180
        msg = nres.fail_msg
2181
        _ErrorIf(msg, self.ENODERPC, nname,
2182
                 "while getting disk information: %s", msg)
2183
        if msg:
2184
          # No data from this node
2185
          data = len(disks) * [(False, msg)]
2186
        else:
2187
          data = []
2188
          for idx, i in enumerate(nres.payload):
2189
            if isinstance(i, (tuple, list)) and len(i) == 2:
2190
              data.append(i)
2191
            else:
2192
              logging.warning("Invalid result from node %s, entry %d: %s",
2193
                              nname, idx, i)
2194
              data.append((False, "Invalid result from the remote node"))
2195

    
2196
      for ((inst, _), status) in zip(disks, data):
2197
        instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2198

    
2199
    # Add empty entries for diskless instances.
2200
    for inst in diskless_instances:
2201
      assert inst not in instdisk
2202
      instdisk[inst] = {}
2203

    
2204
    assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2205
                      len(nnames) <= len(instanceinfo[inst].all_nodes) and
2206
                      compat.all(isinstance(s, (tuple, list)) and
2207
                                 len(s) == 2 for s in statuses)
2208
                      for inst, nnames in instdisk.items()
2209
                      for nname, statuses in nnames.items())
2210
    assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2211

    
2212
    return instdisk
2213

    
2214
  def _VerifyHVP(self, hvp_data):
2215
    """Verifies locally the syntax of the hypervisor parameters.
2216

2217
    """
2218
    for item, hv_name, hv_params in hvp_data:
2219
      msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
2220
             (item, hv_name))
2221
      try:
2222
        hv_class = hypervisor.GetHypervisor(hv_name)
2223
        utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2224
        hv_class.CheckParameterSyntax(hv_params)
2225
      except errors.GenericError, err:
2226
        self._ErrorIf(True, self.ECLUSTERCFG, None, msg % str(err))
2227

    
2228
  def BuildHooksEnv(self):
2229
    """Build hooks env.
2230

2231
    Cluster-Verify hooks just ran in the post phase and their failure makes
2232
    the output be logged in the verify output and the verification to fail.
2233

2234
    """
2235
    env = {
2236
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
2237
      }
2238

    
2239
    env.update(("NODE_TAGS_%s" % node.name, " ".join(node.GetTags()))
2240
               for node in self.my_node_info.values())
2241

    
2242
    return env
2243

    
2244
  def BuildHooksNodes(self):
2245
    """Build hooks nodes.
2246

2247
    """
2248
    assert self.my_node_names, ("Node list not gathered,"
2249
      " has CheckPrereq been executed?")
2250
    return ([], self.my_node_names)
2251

    
2252
  def Exec(self, feedback_fn):
2253
    """Verify integrity of cluster, performing various test on nodes.
2254

2255
    """
2256
    # This method has too many local variables. pylint: disable-msg=R0914
2257
    self.bad = False
2258
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
2259
    verbose = self.op.verbose
2260
    self._feedback_fn = feedback_fn
2261
    feedback_fn("* Verifying global settings")
2262
    for msg in self.cfg.VerifyConfig():
2263
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
2264

    
2265
    # Check the cluster certificates
2266
    for cert_filename in constants.ALL_CERT_FILES:
2267
      (errcode, msg) = _VerifyCertificate(cert_filename)
2268
      _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
2269

    
2270
    vg_name = self.cfg.GetVGName()
2271
    drbd_helper = self.cfg.GetDRBDHelper()
2272
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2273
    cluster = self.cfg.GetClusterInfo()
2274
    groupinfo = self.cfg.GetAllNodeGroupsInfo()
2275
    node_data_list = [self.my_node_info[name] for name in self.my_node_names]
2276

    
2277
    i_non_redundant = [] # Non redundant instances
2278
    i_non_a_balanced = [] # Non auto-balanced instances
2279
    n_offline = 0 # Count of offline nodes
2280
    n_drained = 0 # Count of nodes being drained
2281
    node_vol_should = {}
2282

    
2283
    # FIXME: verify OS list
2284

    
2285
    # File verification
2286
    filemap = _ComputeAncillaryFiles(cluster, False)
2287

    
2288
    # do local checksums
2289
    master_node = self.master_node = self.cfg.GetMasterNode()
2290
    master_ip = self.cfg.GetMasterIP()
2291

    
2292
    # Compute the set of hypervisor parameters
2293
    hvp_data = []
2294
    for hv_name in hypervisors:
2295
      hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
2296
    for os_name, os_hvp in cluster.os_hvp.items():
2297
      for hv_name, hv_params in os_hvp.items():
2298
        if not hv_params:
2299
          continue
2300
        full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
2301
        hvp_data.append(("os %s" % os_name, hv_name, full_params))
2302
    # TODO: collapse identical parameter values in a single one
2303
    for instance in self.all_inst_info.values():
2304
      if not instance.hvparams:
2305
        continue
2306
      hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
2307
                       cluster.FillHV(instance)))
2308
    # and verify them locally
2309
    self._VerifyHVP(hvp_data)
2310

    
2311
    feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_names))
2312
    node_verify_param = {
2313
      constants.NV_FILELIST:
2314
        utils.UniqueSequence(filename
2315
                             for files in filemap
2316
                             for filename in files),
2317
      constants.NV_NODELIST: [node.name for node in self.all_node_info.values()
2318
                              if not node.offline],
2319
      constants.NV_HYPERVISOR: hypervisors,
2320
      constants.NV_HVPARAMS: hvp_data,
2321
      constants.NV_NODENETTEST: [(node.name, node.primary_ip, node.secondary_ip)
2322
                                 for node in node_data_list
2323
                                 if not node.offline],
2324
      constants.NV_INSTANCELIST: hypervisors,
2325
      constants.NV_VERSION: None,
2326
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2327
      constants.NV_NODESETUP: None,
2328
      constants.NV_TIME: None,
2329
      constants.NV_MASTERIP: (master_node, master_ip),
2330
      constants.NV_OSLIST: None,
2331
      constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
2332
      }
2333

    
2334
    if vg_name is not None:
2335
      node_verify_param[constants.NV_VGLIST] = None
2336
      node_verify_param[constants.NV_LVLIST] = vg_name
2337
      node_verify_param[constants.NV_PVLIST] = [vg_name]
2338
      node_verify_param[constants.NV_DRBDLIST] = None
2339

    
2340
    if drbd_helper:
2341
      node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2342

    
2343
    # bridge checks
2344
    # FIXME: this needs to be changed per node-group, not cluster-wide
2345
    bridges = set()
2346
    default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
2347
    if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2348
      bridges.add(default_nicpp[constants.NIC_LINK])
2349
    for instance in instanceinfo.values():
2350
      for nic in instance.nics:
2351
        full_nic = cluster.SimpleFillNIC(nic.nicparams)
2352
        if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2353
          bridges.add(full_nic[constants.NIC_LINK])
2354

    
2355
    if bridges:
2356
      node_verify_param[constants.NV_BRIDGES] = list(bridges)
2357

    
2358
    # Build our expected cluster state
2359
    node_image = dict((node.name, self.NodeImage(offline=node.offline,
2360
                                                 name=node.name,
2361
                                                 vm_capable=node.vm_capable))
2362
                      for node in node_data_list)
2363

    
2364
    # Gather OOB paths
2365
    oob_paths = []
2366
    for node in self.all_node_info.values():
2367
      path = _SupportsOob(self.cfg, node)
2368
      if path and path not in oob_paths:
2369
        oob_paths.append(path)
2370

    
2371
    if oob_paths:
2372
      node_verify_param[constants.NV_OOB_PATHS] = oob_paths
2373

    
2374
    for instance in self.my_inst_names:
2375
      inst_config = self.my_inst_info[instance]
2376

    
2377
      for nname in inst_config.all_nodes:
2378
        if nname not in node_image:
2379
          # ghost node
2380
          gnode = self.NodeImage(name=nname)
2381
          gnode.ghost = True
2382
          node_image[nname] = gnode
2383

    
2384
      inst_config.MapLVsByNode(node_vol_should)
2385

    
2386
      pnode = inst_config.primary_node
2387
      node_image[pnode].pinst.append(instance)
2388

    
2389
      for snode in inst_config.secondary_nodes:
2390
        nimg = node_image[snode]
2391
        nimg.sinst.append(instance)
2392
        if pnode not in nimg.sbp:
2393
          nimg.sbp[pnode] = []
2394
        nimg.sbp[pnode].append(instance)
2395

    
2396
    # At this point, we have the in-memory data structures complete,
2397
    # except for the runtime information, which we'll gather next
2398

    
2399
    # Due to the way our RPC system works, exact response times cannot be
2400
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2401
    # time before and after executing the request, we can at least have a time
2402
    # window.
2403
    nvinfo_starttime = time.time()
2404
    all_nvinfo = self.rpc.call_node_verify(self.my_node_names,
2405
                                           node_verify_param,
2406
                                           self.cfg.GetClusterName())
2407
    nvinfo_endtime = time.time()
2408

    
2409
    all_drbd_map = self.cfg.ComputeDRBDMap()
2410

    
2411
    feedback_fn("* Gathering disk information (%s nodes)" %
2412
                len(self.my_node_names))
2413
    instdisk = self._CollectDiskInfo(self.my_node_names, node_image,
2414
                                     self.my_inst_info)
2415

    
2416
    feedback_fn("* Verifying configuration file consistency")
2417

    
2418
    if master_node not in self.my_node_info:
2419
      # _VerifyFiles requires that master_node is present in the passed node
2420
      # info, to use it as a point of reference even if we're verifying only a
2421
      # subset of nodes. Make it so.
2422
      vf_nvinfo = all_nvinfo.copy()
2423
      vf_node_info = (self.my_node_info.values() +
2424
                      [self.all_node_info[master_node]])
2425

    
2426
      key = constants.NV_FILELIST
2427
      vf_nvinfo.update(self.rpc.call_node_verify([master_node],
2428
                                                 {key: node_verify_param[key]},
2429
                                                 self.cfg.GetClusterName()))
2430
    else:
2431
      vf_nvinfo = all_nvinfo
2432
      vf_node_info = self.my_node_info.values()
2433

    
2434
    self._VerifyFiles(_ErrorIf, vf_node_info, master_node, vf_nvinfo, filemap)
2435

    
2436
    feedback_fn("* Verifying node status")
2437

    
2438
    refos_img = None
2439

    
2440
    for node_i in node_data_list:
2441
      node = node_i.name
2442
      nimg = node_image[node]
2443

    
2444
      if node_i.offline:
2445
        if verbose:
2446
          feedback_fn("* Skipping offline node %s" % (node,))
2447
        n_offline += 1
2448
        continue
2449

    
2450
      if node == master_node:
2451
        ntype = "master"
2452
      elif node_i.master_candidate:
2453
        ntype = "master candidate"
2454
      elif node_i.drained:
2455
        ntype = "drained"
2456
        n_drained += 1
2457
      else:
2458
        ntype = "regular"
2459
      if verbose:
2460
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2461

    
2462
      msg = all_nvinfo[node].fail_msg
2463
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2464
      if msg:
2465
        nimg.rpc_fail = True
2466
        continue
2467

    
2468
      nresult = all_nvinfo[node].payload
2469

    
2470
      nimg.call_ok = self._VerifyNode(node_i, nresult)
2471
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2472
      self._VerifyNodeNetwork(node_i, nresult)
2473
      self._VerifyOob(node_i, nresult)
2474

    
2475
      if nimg.vm_capable:
2476
        self._VerifyNodeLVM(node_i, nresult, vg_name)
2477
        self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info, drbd_helper,
2478
                             all_drbd_map)
2479

    
2480
        self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2481
        self._UpdateNodeInstances(node_i, nresult, nimg)
2482
        self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2483
        self._UpdateNodeOS(node_i, nresult, nimg)
2484
        if not nimg.os_fail:
2485
          if refos_img is None:
2486
            refos_img = nimg
2487
          self._VerifyNodeOS(node_i, nimg, refos_img)
2488
        self._VerifyNodeBridges(node_i, nresult, bridges)
2489

    
2490
    feedback_fn("* Verifying instance status")
2491
    for instance in self.my_inst_names:
2492
      if verbose:
2493
        feedback_fn("* Verifying instance %s" % instance)
2494
      inst_config = self.my_inst_info[instance]
2495
      self._VerifyInstance(instance, inst_config, node_image,
2496
                           instdisk[instance])
2497
      inst_nodes_offline = []
2498

    
2499
      pnode = inst_config.primary_node
2500
      pnode_img = node_image[pnode]
2501
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2502
               self.ENODERPC, pnode, "instance %s, connection to"
2503
               " primary node failed", instance)
2504

    
2505
      _ErrorIf(inst_config.admin_up and pnode_img.offline,
2506
               self.EINSTANCEBADNODE, instance,
2507
               "instance is marked as running and lives on offline node %s",
2508
               inst_config.primary_node)
2509

    
2510
      # If the instance is non-redundant we cannot survive losing its primary
2511
      # node, so we are not N+1 compliant. On the other hand we have no disk
2512
      # templates with more than one secondary so that situation is not well
2513
      # supported either.
2514
      # FIXME: does not support file-backed instances
2515
      if not inst_config.secondary_nodes:
2516
        i_non_redundant.append(instance)
2517

    
2518
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2519
               instance, "instance has multiple secondary nodes: %s",
2520
               utils.CommaJoin(inst_config.secondary_nodes),
2521
               code=self.ETYPE_WARNING)
2522

    
2523
      if inst_config.disk_template in constants.DTS_INT_MIRROR:
2524
        pnode = inst_config.primary_node
2525
        instance_nodes = utils.NiceSort(inst_config.all_nodes)
2526
        instance_groups = {}
2527

    
2528
        for node in instance_nodes:
2529
          instance_groups.setdefault(self.all_node_info[node].group,
2530
                                     []).append(node)
2531

    
2532
        pretty_list = [
2533
          "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
2534
          # Sort so that we always list the primary node first.
2535
          for group, nodes in sorted(instance_groups.items(),
2536
                                     key=lambda (_, nodes): pnode in nodes,
2537
                                     reverse=True)]
2538

    
2539
        self._ErrorIf(len(instance_groups) > 1, self.EINSTANCESPLITGROUPS,
2540
                      instance, "instance has primary and secondary nodes in"
2541
                      " different groups: %s", utils.CommaJoin(pretty_list),
2542
                      code=self.ETYPE_WARNING)
2543

    
2544
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2545
        i_non_a_balanced.append(instance)
2546

    
2547
      for snode in inst_config.secondary_nodes:
2548
        s_img = node_image[snode]
2549
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2550
                 "instance %s, connection to secondary node failed", instance)
2551

    
2552
        if s_img.offline:
2553
          inst_nodes_offline.append(snode)
2554

    
2555
      # warn that the instance lives on offline nodes
2556
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2557
               "instance has offline secondary node(s) %s",
2558
               utils.CommaJoin(inst_nodes_offline))
2559
      # ... or ghost/non-vm_capable nodes
2560
      for node in inst_config.all_nodes:
2561
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2562
                 "instance lives on ghost node %s", node)
2563
        _ErrorIf(not node_image[node].vm_capable, self.EINSTANCEBADNODE,
2564
                 instance, "instance lives on non-vm_capable node %s", node)
2565

    
2566
    feedback_fn("* Verifying orphan volumes")
2567
    reserved = utils.FieldSet(*cluster.reserved_lvs)
2568
    self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2569

    
2570
    feedback_fn("* Verifying orphan instances")
2571
    self._VerifyOrphanInstances(set(self.all_inst_info.keys()), node_image)
2572

    
2573
    if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2574
      feedback_fn("* Verifying N+1 Memory redundancy")
2575
      self._VerifyNPlusOneMemory(node_image, self.my_inst_info)
2576

    
2577
    feedback_fn("* Other Notes")
2578
    if i_non_redundant:
2579
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
2580
                  % len(i_non_redundant))
2581

    
2582
    if i_non_a_balanced:
2583
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
2584
                  % len(i_non_a_balanced))
2585

    
2586
    if n_offline:
2587
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
2588

    
2589
    if n_drained:
2590
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
2591

    
2592
    return not self.bad
2593

    
2594
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2595
    """Analyze the post-hooks' result
2596

2597
    This method analyses the hook result, handles it, and sends some
2598
    nicely-formatted feedback back to the user.
2599

2600
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
2601
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2602
    @param hooks_results: the results of the multi-node hooks rpc call
2603
    @param feedback_fn: function used send feedback back to the caller
2604
    @param lu_result: previous Exec result
2605
    @return: the new Exec result, based on the previous result
2606
        and hook results
2607

2608
    """
2609
    # We only really run POST phase hooks, and are only interested in
2610
    # their results
2611
    if phase == constants.HOOKS_PHASE_POST:
2612
      # Used to change hooks' output to proper indentation
2613
      feedback_fn("* Hooks Results")
2614
      assert hooks_results, "invalid result from hooks"
2615

    
2616
      for node_name in hooks_results:
2617
        res = hooks_results[node_name]
2618
        msg = res.fail_msg
2619
        test = msg and not res.offline
2620
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
2621
                      "Communication failure in hooks execution: %s", msg)
2622
        if res.offline or msg:
2623
          # No need to investigate payload if node is offline or gave an error.
2624
          # override manually lu_result here as _ErrorIf only
2625
          # overrides self.bad
2626
          lu_result = 1
2627
          continue
2628
        for script, hkr, output in res.payload:
2629
          test = hkr == constants.HKR_FAIL
2630
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
2631
                        "Script %s failed, output:", script)
2632
          if test:
2633
            output = self._HOOKS_INDENT_RE.sub('      ', output)
2634
            feedback_fn("%s" % output)
2635
            lu_result = 0
2636

    
2637
      return lu_result
2638

    
2639

    
2640
class LUClusterVerifyDisks(NoHooksLU):
2641
  """Verifies the cluster disks status.
2642

2643
  """
2644
  REQ_BGL = False
2645

    
2646
  def ExpandNames(self):
2647
    self.needed_locks = {
2648
      locking.LEVEL_NODE: locking.ALL_SET,
2649
      locking.LEVEL_INSTANCE: locking.ALL_SET,
2650
    }
2651
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2652

    
2653
  def Exec(self, feedback_fn):
2654
    """Verify integrity of cluster disks.
2655

2656
    @rtype: tuple of three items
2657
    @return: a tuple of (dict of node-to-node_error, list of instances
2658
        which need activate-disks, dict of instance: (node, volume) for
2659
        missing volumes
2660

2661
    """
2662
    result = res_nodes, res_instances, res_missing = {}, [], {}
2663

    
2664
    nodes = utils.NiceSort(self.cfg.GetVmCapableNodeList())
2665
    instances = self.cfg.GetAllInstancesInfo().values()
2666

    
2667
    nv_dict = {}
2668
    for inst in instances:
2669
      inst_lvs = {}
2670
      if not inst.admin_up:
2671
        continue
2672
      inst.MapLVsByNode(inst_lvs)
2673
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
2674
      for node, vol_list in inst_lvs.iteritems():
2675
        for vol in vol_list:
2676
          nv_dict[(node, vol)] = inst
2677

    
2678
    if not nv_dict:
2679
      return result
2680

    
2681
    node_lvs = self.rpc.call_lv_list(nodes, [])
2682
    for node, node_res in node_lvs.items():
2683
      if node_res.offline:
2684
        continue
2685
      msg = node_res.fail_msg
2686
      if msg:
2687
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2688
        res_nodes[node] = msg
2689
        continue
2690

    
2691
      lvs = node_res.payload
2692
      for lv_name, (_, _, lv_online) in lvs.items():
2693
        inst = nv_dict.pop((node, lv_name), None)
2694
        if (not lv_online and inst is not None
2695
            and inst.name not in res_instances):
2696
          res_instances.append(inst.name)
2697

    
2698
    # any leftover items in nv_dict are missing LVs, let's arrange the
2699
    # data better
2700
    for key, inst in nv_dict.iteritems():
2701
      if inst.name not in res_missing:
2702
        res_missing[inst.name] = []
2703
      res_missing[inst.name].append(key)
2704

    
2705
    return result
2706

    
2707

    
2708
class LUClusterRepairDiskSizes(NoHooksLU):
2709
  """Verifies the cluster disks sizes.
2710

2711
  """
2712
  REQ_BGL = False
2713

    
2714
  def ExpandNames(self):
2715
    if self.op.instances:
2716
      self.wanted_names = _GetWantedInstances(self, self.op.instances)
2717
      self.needed_locks = {
2718
        locking.LEVEL_NODE: [],
2719
        locking.LEVEL_INSTANCE: self.wanted_names,
2720
        }
2721
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2722
    else:
2723
      self.wanted_names = None
2724
      self.needed_locks = {
2725
        locking.LEVEL_NODE: locking.ALL_SET,
2726
        locking.LEVEL_INSTANCE: locking.ALL_SET,
2727
        }
2728
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2729

    
2730
  def DeclareLocks(self, level):
2731
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
2732
      self._LockInstancesNodes(primary_only=True)
2733

    
2734
  def CheckPrereq(self):
2735
    """Check prerequisites.
2736

2737
    This only checks the optional instance list against the existing names.
2738

2739
    """
2740
    if self.wanted_names is None:
2741
      self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
2742

    
2743
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2744
                             in self.wanted_names]
2745

    
2746
  def _EnsureChildSizes(self, disk):
2747
    """Ensure children of the disk have the needed disk size.
2748

2749
    This is valid mainly for DRBD8 and fixes an issue where the
2750
    children have smaller disk size.
2751

2752
    @param disk: an L{ganeti.objects.Disk} object
2753

2754
    """
2755
    if disk.dev_type == constants.LD_DRBD8:
2756
      assert disk.children, "Empty children for DRBD8?"
2757
      fchild = disk.children[0]
2758
      mismatch = fchild.size < disk.size
2759
      if mismatch:
2760
        self.LogInfo("Child disk has size %d, parent %d, fixing",
2761
                     fchild.size, disk.size)
2762
        fchild.size = disk.size
2763

    
2764
      # and we recurse on this child only, not on the metadev
2765
      return self._EnsureChildSizes(fchild) or mismatch
2766
    else:
2767
      return False
2768

    
2769
  def Exec(self, feedback_fn):
2770
    """Verify the size of cluster disks.
2771

2772
    """
2773
    # TODO: check child disks too
2774
    # TODO: check differences in size between primary/secondary nodes
2775
    per_node_disks = {}
2776
    for instance in self.wanted_instances:
2777
      pnode = instance.primary_node
2778
      if pnode not in per_node_disks:
2779
        per_node_disks[pnode] = []
2780
      for idx, disk in enumerate(instance.disks):
2781
        per_node_disks[pnode].append((instance, idx, disk))
2782

    
2783
    changed = []
2784
    for node, dskl in per_node_disks.items():
2785
      newl = [v[2].Copy() for v in dskl]
2786
      for dsk in newl:
2787
        self.cfg.SetDiskID(dsk, node)
2788
      result = self.rpc.call_blockdev_getsize(node, newl)
2789
      if result.fail_msg:
2790
        self.LogWarning("Failure in blockdev_getsize call to node"
2791
                        " %s, ignoring", node)
2792
        continue
2793
      if len(result.payload) != len(dskl):
2794
        logging.warning("Invalid result from node %s: len(dksl)=%d,"
2795
                        " result.payload=%s", node, len(dskl), result.payload)
2796
        self.LogWarning("Invalid result from node %s, ignoring node results",
2797
                        node)
2798
        continue
2799
      for ((instance, idx, disk), size) in zip(dskl, result.payload):
2800
        if size is None:
2801
          self.LogWarning("Disk %d of instance %s did not return size"
2802
                          " information, ignoring", idx, instance.name)
2803
          continue
2804
        if not isinstance(size, (int, long)):
2805
          self.LogWarning("Disk %d of instance %s did not return valid"
2806
                          " size information, ignoring", idx, instance.name)
2807
          continue
2808
        size = size >> 20
2809
        if size != disk.size:
2810
          self.LogInfo("Disk %d of instance %s has mismatched size,"
2811
                       " correcting: recorded %d, actual %d", idx,
2812
                       instance.name, disk.size, size)
2813
          disk.size = size
2814
          self.cfg.Update(instance, feedback_fn)
2815
          changed.append((instance.name, idx, size))
2816
        if self._EnsureChildSizes(disk):
2817
          self.cfg.Update(instance, feedback_fn)
2818
          changed.append((instance.name, idx, disk.size))
2819
    return changed
2820

    
2821

    
2822
class LUClusterRename(LogicalUnit):
2823
  """Rename the cluster.
2824

2825
  """
2826
  HPATH = "cluster-rename"
2827
  HTYPE = constants.HTYPE_CLUSTER
2828

    
2829
  def BuildHooksEnv(self):
2830
    """Build hooks env.
2831

2832
    """
2833
    return {
2834
      "OP_TARGET": self.cfg.GetClusterName(),
2835
      "NEW_NAME": self.op.name,
2836
      }
2837

    
2838
  def BuildHooksNodes(self):
2839
    """Build hooks nodes.
2840

2841
    """
2842
    return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList())
2843

    
2844
  def CheckPrereq(self):
2845
    """Verify that the passed name is a valid one.
2846

2847
    """
2848
    hostname = netutils.GetHostname(name=self.op.name,
2849
                                    family=self.cfg.GetPrimaryIPFamily())
2850

    
2851
    new_name = hostname.name
2852
    self.ip = new_ip = hostname.ip
2853
    old_name = self.cfg.GetClusterName()
2854
    old_ip = self.cfg.GetMasterIP()
2855
    if new_name == old_name and new_ip == old_ip:
2856
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
2857
                                 " cluster has changed",
2858
                                 errors.ECODE_INVAL)
2859
    if new_ip != old_ip:
2860
      if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2861
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
2862
                                   " reachable on the network" %
2863
                                   new_ip, errors.ECODE_NOTUNIQUE)
2864

    
2865
    self.op.name = new_name
2866

    
2867
  def Exec(self, feedback_fn):
2868
    """Rename the cluster.
2869

2870
    """
2871
    clustername = self.op.name
2872
    ip = self.ip
2873

    
2874
    # shutdown the master IP
2875
    master = self.cfg.GetMasterNode()
2876
    result = self.rpc.call_node_stop_master(master, False)
2877
    result.Raise("Could not disable the master role")
2878

    
2879
    try:
2880
      cluster = self.cfg.GetClusterInfo()
2881
      cluster.cluster_name = clustername
2882
      cluster.master_ip = ip
2883
      self.cfg.Update(cluster, feedback_fn)
2884

    
2885
      # update the known hosts file
2886
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2887
      node_list = self.cfg.GetOnlineNodeList()
2888
      try:
2889
        node_list.remove(master)
2890
      except ValueError:
2891
        pass
2892
      _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
2893
    finally:
2894
      result = self.rpc.call_node_start_master(master, False, False)
2895
      msg = result.fail_msg
2896
      if msg:
2897
        self.LogWarning("Could not re-enable the master role on"
2898
                        " the master, please restart manually: %s", msg)
2899

    
2900
    return clustername
2901

    
2902

    
2903
class LUClusterSetParams(LogicalUnit):
2904
  """Change the parameters of the cluster.
2905

2906
  """
2907
  HPATH = "cluster-modify"
2908
  HTYPE = constants.HTYPE_CLUSTER
2909
  REQ_BGL = False
2910

    
2911
  def CheckArguments(self):
2912
    """Check parameters
2913

2914
    """
2915
    if self.op.uid_pool:
2916
      uidpool.CheckUidPool(self.op.uid_pool)
2917

    
2918
    if self.op.add_uids:
2919
      uidpool.CheckUidPool(self.op.add_uids)
2920

    
2921
    if self.op.remove_uids:
2922
      uidpool.CheckUidPool(self.op.remove_uids)
2923

    
2924
  def ExpandNames(self):
2925
    # FIXME: in the future maybe other cluster params won't require checking on
2926
    # all nodes to be modified.
2927
    self.needed_locks = {
2928
      locking.LEVEL_NODE: locking.ALL_SET,
2929
    }
2930
    self.share_locks[locking.LEVEL_NODE] = 1
2931

    
2932
  def BuildHooksEnv(self):
2933
    """Build hooks env.
2934

2935
    """
2936
    return {
2937
      "OP_TARGET": self.cfg.GetClusterName(),
2938
      "NEW_VG_NAME": self.op.vg_name,
2939
      }
2940

    
2941
  def BuildHooksNodes(self):
2942
    """Build hooks nodes.
2943

2944
    """
2945
    mn = self.cfg.GetMasterNode()
2946
    return ([mn], [mn])
2947

    
2948
  def CheckPrereq(self):
2949
    """Check prerequisites.
2950

2951
    This checks whether the given params don't conflict and
2952
    if the given volume group is valid.
2953

2954
    """
2955
    if self.op.vg_name is not None and not self.op.vg_name:
2956
      if self.cfg.HasAnyDiskOfType(constants.LD_LV):
2957
        raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
2958
                                   " instances exist", errors.ECODE_INVAL)
2959

    
2960
    if self.op.drbd_helper is not None and not self.op.drbd_helper:
2961
      if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
2962
        raise errors.OpPrereqError("Cannot disable drbd helper while"
2963
                                   " drbd-based instances exist",
2964
                                   errors.ECODE_INVAL)
2965

    
2966
    node_list = self.glm.list_owned(locking.LEVEL_NODE)
2967

    
2968
    # if vg_name not None, checks given volume group on all nodes
2969
    if self.op.vg_name:
2970
      vglist = self.rpc.call_vg_list(node_list)
2971
      for node in node_list:
2972
        msg = vglist[node].fail_msg
2973
        if msg:
2974
          # ignoring down node
2975
          self.LogWarning("Error while gathering data on node %s"
2976
                          " (ignoring node): %s", node, msg)
2977
          continue
2978
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2979
                                              self.op.vg_name,
2980
                                              constants.MIN_VG_SIZE)
2981
        if vgstatus:
2982
          raise errors.OpPrereqError("Error on node '%s': %s" %
2983
                                     (node, vgstatus), errors.ECODE_ENVIRON)
2984

    
2985
    if self.op.drbd_helper:
2986
      # checks given drbd helper on all nodes
2987
      helpers = self.rpc.call_drbd_helper(node_list)
2988
      for node in node_list:
2989
        ninfo = self.cfg.GetNodeInfo(node)
2990
        if ninfo.offline:
2991
          self.LogInfo("Not checking drbd helper on offline node %s", node)
2992
          continue
2993
        msg = helpers[node].fail_msg
2994
        if msg:
2995
          raise errors.OpPrereqError("Error checking drbd helper on node"
2996
                                     " '%s': %s" % (node, msg),
2997
                                     errors.ECODE_ENVIRON)
2998
        node_helper = helpers[node].payload
2999
        if node_helper != self.op.drbd_helper:
3000
          raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
3001
                                     (node, node_helper), errors.ECODE_ENVIRON)
3002

    
3003
    self.cluster = cluster = self.cfg.GetClusterInfo()
3004
    # validate params changes
3005
    if self.op.beparams:
3006
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
3007
      self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
3008

    
3009
    if self.op.ndparams:
3010
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
3011
      self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
3012

    
3013
      # TODO: we need a more general way to handle resetting
3014
      # cluster-level parameters to default values
3015
      if self.new_ndparams["oob_program"] == "":
3016
        self.new_ndparams["oob_program"] = \
3017
            constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
3018

    
3019
    if self.op.nicparams:
3020
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
3021
      self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
3022
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
3023
      nic_errors = []
3024

    
3025
      # check all instances for consistency
3026
      for instance in self.cfg.GetAllInstancesInfo().values():
3027
        for nic_idx, nic in enumerate(instance.nics):
3028
          params_copy = copy.deepcopy(nic.nicparams)
3029
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
3030

    
3031
          # check parameter syntax
3032
          try:
3033
            objects.NIC.CheckParameterSyntax(params_filled)
3034
          except errors.ConfigurationError, err:
3035
            nic_errors.append("Instance %s, nic/%d: %s" %
3036
                              (instance.name, nic_idx, err))
3037

    
3038
          # if we're moving instances to routed, check that they have an ip
3039
          target_mode = params_filled[constants.NIC_MODE]
3040
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
3041
            nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
3042
                              " address" % (instance.name, nic_idx))
3043
      if nic_errors:
3044
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
3045
                                   "\n".join(nic_errors))
3046

    
3047
    # hypervisor list/parameters
3048
    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
3049
    if self.op.hvparams:
3050
      for hv_name, hv_dict in self.op.hvparams.items():
3051
        if hv_name not in self.new_hvparams:
3052
          self.new_hvparams[hv_name] = hv_dict
3053
        else:
3054
          self.new_hvparams[hv_name].update(hv_dict)
3055

    
3056
    # os hypervisor parameters
3057
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
3058
    if self.op.os_hvp:
3059
      for os_name, hvs in self.op.os_hvp.items():
3060
        if os_name not in self.new_os_hvp:
3061
          self.new_os_hvp[os_name] = hvs
3062
        else:
3063
          for hv_name, hv_dict in hvs.items():
3064
            if hv_name not in self.new_os_hvp[os_name]:
3065
              self.new_os_hvp[os_name][hv_name] = hv_dict
3066
            else:
3067
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
3068

    
3069
    # os parameters
3070
    self.new_osp = objects.FillDict(cluster.osparams, {})
3071
    if self.op.osparams:
3072
      for os_name, osp in self.op.osparams.items():
3073
        if os_name not in self.new_osp:
3074
          self.new_osp[os_name] = {}
3075

    
3076
        self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
3077
                                                  use_none=True)
3078

    
3079
        if not self.new_osp[os_name]:
3080
          # we removed all parameters
3081
          del self.new_osp[os_name]
3082
        else:
3083
          # check the parameter validity (remote check)
3084
          _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
3085
                         os_name, self.new_osp[os_name])
3086

    
3087
    # changes to the hypervisor list
3088
    if self.op.enabled_hypervisors is not None:
3089
      self.hv_list = self.op.enabled_hypervisors
3090
      for hv in self.hv_list:
3091
        # if the hypervisor doesn't already exist in the cluster
3092
        # hvparams, we initialize it to empty, and then (in both
3093
        # cases) we make sure to fill the defaults, as we might not
3094
        # have a complete defaults list if the hypervisor wasn't
3095
        # enabled before
3096
        if hv not in new_hvp:
3097
          new_hvp[hv] = {}
3098
        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
3099
        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
3100
    else:
3101
      self.hv_list = cluster.enabled_hypervisors
3102

    
3103
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
3104
      # either the enabled list has changed, or the parameters have, validate
3105
      for hv_name, hv_params in self.new_hvparams.items():
3106
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
3107
            (self.op.enabled_hypervisors and
3108
             hv_name in self.op.enabled_hypervisors)):
3109
          # either this is a new hypervisor, or its parameters have changed
3110
          hv_class = hypervisor.GetHypervisor(hv_name)
3111
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
3112
          hv_class.CheckParameterSyntax(hv_params)
3113
          _CheckHVParams(self, node_list, hv_name, hv_params)
3114

    
3115
    if self.op.os_hvp:
3116
      # no need to check any newly-enabled hypervisors, since the
3117
      # defaults have already been checked in the above code-block
3118
      for os_name, os_hvp in self.new_os_hvp.items():
3119
        for hv_name, hv_params in os_hvp.items():
3120
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
3121
          # we need to fill in the new os_hvp on top of the actual hv_p
3122
          cluster_defaults = self.new_hvparams.get(hv_name, {})
3123
          new_osp = objects.FillDict(cluster_defaults, hv_params)
3124
          hv_class = hypervisor.GetHypervisor(hv_name)
3125
          hv_class.CheckParameterSyntax(new_osp)
3126
          _CheckHVParams(self, node_list, hv_name, new_osp)
3127

    
3128
    if self.op.default_iallocator:
3129
      alloc_script = utils.FindFile(self.op.default_iallocator,
3130
                                    constants.IALLOCATOR_SEARCH_PATH,
3131
                                    os.path.isfile)
3132
      if alloc_script is None:
3133
        raise errors.OpPrereqError("Invalid default iallocator script '%s'"
3134
                                   " specified" % self.op.default_iallocator,
3135
                                   errors.ECODE_INVAL)
3136

    
3137
  def Exec(self, feedback_fn):
3138
    """Change the parameters of the cluster.
3139

3140
    """
3141
    if self.op.vg_name is not None:
3142
      new_volume = self.op.vg_name
3143
      if not new_volume:
3144
        new_volume = None
3145
      if new_volume != self.cfg.GetVGName():
3146
        self.cfg.SetVGName(new_volume)
3147
      else:
3148
        feedback_fn("Cluster LVM configuration already in desired"
3149
                    " state, not changing")
3150
    if self.op.drbd_helper is not None:
3151
      new_helper = self.op.drbd_helper
3152
      if not new_helper:
3153
        new_helper = None
3154
      if new_helper != self.cfg.GetDRBDHelper():
3155
        self.cfg.SetDRBDHelper(new_helper)
3156
      else:
3157
        feedback_fn("Cluster DRBD helper already in desired state,"
3158
                    " not changing")
3159
    if self.op.hvparams:
3160
      self.cluster.hvparams = self.new_hvparams
3161
    if self.op.os_hvp:
3162
      self.cluster.os_hvp = self.new_os_hvp
3163
    if self.op.enabled_hypervisors is not None:
3164
      self.cluster.hvparams = self.new_hvparams
3165
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
3166
    if self.op.beparams:
3167
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
3168
    if self.op.nicparams:
3169
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
3170
    if self.op.osparams:
3171
      self.cluster.osparams = self.new_osp
3172
    if self.op.ndparams:
3173
      self.cluster.ndparams = self.new_ndparams
3174

    
3175
    if self.op.candidate_pool_size is not None:
3176
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
3177
      # we need to update the pool size here, otherwise the save will fail
3178
      _AdjustCandidatePool(self, [])
3179

    
3180
    if self.op.maintain_node_health is not None:
3181
      self.cluster.maintain_node_health = self.op.maintain_node_health
3182

    
3183
    if self.op.prealloc_wipe_disks is not None:
3184
      self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
3185

    
3186
    if self.op.add_uids is not None:
3187
      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
3188

    
3189
    if self.op.remove_uids is not None:
3190
      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
3191

    
3192
    if self.op.uid_pool is not None:
3193
      self.cluster.uid_pool = self.op.uid_pool
3194

    
3195
    if self.op.default_iallocator is not None:
3196
      self.cluster.default_iallocator = self.op.default_iallocator
3197

    
3198
    if self.op.reserved_lvs is not None:
3199
      self.cluster.reserved_lvs = self.op.reserved_lvs
3200

    
3201
    def helper_os(aname, mods, desc):
3202
      desc += " OS list"
3203
      lst = getattr(self.cluster, aname)
3204
      for key, val in mods:
3205
        if key == constants.DDM_ADD:
3206
          if val in lst:
3207
            feedback_fn("OS %s already in %s, ignoring" % (val, desc))
3208
          else:
3209
            lst.append(val)
3210
        elif key == constants.DDM_REMOVE:
3211
          if val in lst:
3212
            lst.remove(val)
3213
          else:
3214
            feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
3215
        else:
3216
          raise errors.ProgrammerError("Invalid modification '%s'" % key)
3217

    
3218
    if self.op.hidden_os:
3219
      helper_os("hidden_os", self.op.hidden_os, "hidden")
3220

    
3221
    if self.op.blacklisted_os:
3222
      helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
3223

    
3224
    if self.op.master_netdev:
3225
      master = self.cfg.GetMasterNode()
3226
      feedback_fn("Shutting down master ip on the current netdev (%s)" %
3227
                  self.cluster.master_netdev)
3228
      result = self.rpc.call_node_stop_master(master, False)
3229
      result.Raise("Could not disable the master ip")
3230
      feedback_fn("Changing master_netdev from %s to %s" %
3231
                  (self.cluster.master_netdev, self.op.master_netdev))
3232
      self.cluster.master_netdev = self.op.master_netdev
3233

    
3234
    self.cfg.Update(self.cluster, feedback_fn)
3235

    
3236
    if self.op.master_netdev:
3237
      feedback_fn("Starting the master ip on the new master netdev (%s)" %
3238
                  self.op.master_netdev)
3239
      result = self.rpc.call_node_start_master(master, False, False)
3240
      if result.fail_msg:
3241
        self.LogWarning("Could not re-enable the master ip on"
3242
                        " the master, please restart manually: %s",
3243
                        result.fail_msg)
3244

    
3245

    
3246
def _UploadHelper(lu, nodes, fname):
3247
  """Helper for uploading a file and showing warnings.
3248

3249
  """
3250
  if os.path.exists(fname):
3251
    result = lu.rpc.call_upload_file(nodes, fname)
3252
    for to_node, to_result in result.items():
3253
      msg = to_result.fail_msg
3254
      if msg:
3255
        msg = ("Copy of file %s to node %s failed: %s" %
3256
               (fname, to_node, msg))
3257
        lu.proc.LogWarning(msg)
3258

    
3259

    
3260
def _ComputeAncillaryFiles(cluster, redist):
3261
  """Compute files external to Ganeti which need to be consistent.
3262

3263
  @type redist: boolean
3264
  @param redist: Whether to include files which need to be redistributed
3265

3266
  """
3267
  # Compute files for all nodes
3268
  files_all = set([
3269
    constants.SSH_KNOWN_HOSTS_FILE,
3270
    constants.CONFD_HMAC_KEY,
3271
    constants.CLUSTER_DOMAIN_SECRET_FILE,
3272
    ])
3273

    
3274
  if not redist:
3275
    files_all.update(constants.ALL_CERT_FILES)
3276
    files_all.update(ssconf.SimpleStore().GetFileList())
3277

    
3278
  if cluster.modify_etc_hosts:
3279
    files_all.add(constants.ETC_HOSTS)
3280

    
3281
  # Files which must either exist on all nodes or on none
3282
  files_all_opt = set([
3283
    constants.RAPI_USERS_FILE,
3284
    ])
3285

    
3286
  # Files which should only be on master candidates
3287
  files_mc = set()
3288
  if not redist:
3289
    files_mc.add(constants.CLUSTER_CONF_FILE)
3290

    
3291
  # Files which should only be on VM-capable nodes
3292
  files_vm = set(filename
3293
    for hv_name in cluster.enabled_hypervisors
3294
    for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles())
3295

    
3296
  # Filenames must be unique
3297
  assert (len(files_all | files_all_opt | files_mc | files_vm) ==
3298
          sum(map(len, [files_all, files_all_opt, files_mc, files_vm]))), \
3299
         "Found file listed in more than one file list"
3300

    
3301
  return (files_all, files_all_opt, files_mc, files_vm)
3302

    
3303

    
3304
def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
3305
  """Distribute additional files which are part of the cluster configuration.
3306

3307
  ConfigWriter takes care of distributing the config and ssconf files, but
3308
  there are more files which should be distributed to all nodes. This function
3309
  makes sure those are copied.
3310

3311
  @param lu: calling logical unit
3312
  @param additional_nodes: list of nodes not in the config to distribute to
3313
  @type additional_vm: boolean
3314
  @param additional_vm: whether the additional nodes are vm-capable or not
3315

3316
  """
3317
  # Gather target nodes
3318
  cluster = lu.cfg.GetClusterInfo()
3319
  master_info = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
3320

    
3321
  online_nodes = lu.cfg.GetOnlineNodeList()
3322
  vm_nodes = lu.cfg.GetVmCapableNodeList()
3323

    
3324
  if additional_nodes is not None:
3325
    online_nodes.extend(additional_nodes)
3326
    if additional_vm:
3327
      vm_nodes.extend(additional_nodes)
3328

    
3329
  # Never distribute to master node
3330
  for nodelist in [online_nodes, vm_nodes]:
3331
    if master_info.name in nodelist:
3332
      nodelist.remove(master_info.name)
3333

    
3334
  # Gather file lists
3335
  (files_all, files_all_opt, files_mc, files_vm) = \
3336
    _ComputeAncillaryFiles(cluster, True)
3337

    
3338
  # Never re-distribute configuration file from here
3339
  assert not (constants.CLUSTER_CONF_FILE in files_all or
3340
              constants.CLUSTER_CONF_FILE in files_vm)
3341
  assert not files_mc, "Master candidates not handled in this function"
3342

    
3343
  filemap = [
3344
    (online_nodes, files_all),
3345
    (online_nodes, files_all_opt),
3346
    (vm_nodes, files_vm),
3347
    ]
3348

    
3349
  # Upload the files
3350
  for (node_list, files) in filemap:
3351
    for fname in files:
3352
      _UploadHelper(lu, node_list, fname)
3353

    
3354

    
3355
class LUClusterRedistConf(NoHooksLU):
3356
  """Force the redistribution of cluster configuration.
3357

3358
  This is a very simple LU.
3359

3360
  """
3361
  REQ_BGL = False
3362

    
3363
  def ExpandNames(self):
3364
    self.needed_locks = {
3365
      locking.LEVEL_NODE: locking.ALL_SET,
3366
    }
3367
    self.share_locks[locking.LEVEL_NODE] = 1
3368

    
3369
  def Exec(self, feedback_fn):
3370
    """Redistribute the configuration.
3371

3372
    """
3373
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
3374
    _RedistributeAncillaryFiles(self)
3375

    
3376

    
3377
def _WaitForSync(lu, instance, disks=None, oneshot=False):
3378
  """Sleep and poll for an instance's disk to sync.
3379

3380
  """
3381
  if not instance.disks or disks is not None and not disks:
3382
    return True
3383

    
3384
  disks = _ExpandCheckDisks(instance, disks)
3385

    
3386
  if not oneshot:
3387
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
3388

    
3389
  node = instance.primary_node
3390

    
3391
  for dev in disks:
3392
    lu.cfg.SetDiskID(dev, node)
3393

    
3394
  # TODO: Convert to utils.Retry
3395

    
3396
  retries = 0
3397
  degr_retries = 10 # in seconds, as we sleep 1 second each time
3398
  while True:
3399
    max_time = 0
3400
    done = True
3401
    cumul_degraded = False
3402
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
3403
    msg = rstats.fail_msg
3404
    if msg:
3405
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
3406
      retries += 1
3407
      if retries >= 10:
3408
        raise errors.RemoteError("Can't contact node %s for mirror data,"
3409
                                 " aborting." % node)
3410
      time.sleep(6)
3411
      continue
3412
    rstats = rstats.payload
3413
    retries = 0
3414
    for i, mstat in enumerate(rstats):
3415
      if mstat is None:
3416
        lu.LogWarning("Can't compute data for node %s/%s",
3417
                           node, disks[i].iv_name)
3418
        continue
3419

    
3420
      cumul_degraded = (cumul_degraded or
3421
                        (mstat.is_degraded and mstat.sync_percent is None))
3422
      if mstat.sync_percent is not None:
3423
        done = False
3424
        if mstat.estimated_time is not None:
3425
          rem_time = ("%s remaining (estimated)" %
3426
                      utils.FormatSeconds(mstat.estimated_time))
3427
          max_time = mstat.estimated_time
3428
        else:
3429
          rem_time = "no time estimate"
3430
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3431
                        (disks[i].iv_name, mstat.sync_percent, rem_time))
3432

    
3433
    # if we're done but degraded, let's do a few small retries, to
3434
    # make sure we see a stable and not transient situation; therefore
3435
    # we force restart of the loop
3436
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
3437
      logging.info("Degraded disks found, %d retries left", degr_retries)
3438
      degr_retries -= 1
3439
      time.sleep(1)
3440
      continue
3441

    
3442
    if done or oneshot:
3443
      break
3444

    
3445
    time.sleep(min(60, max_time))
3446

    
3447
  if done:
3448
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3449
  return not cumul_degraded
3450

    
3451

    
3452
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3453
  """Check that mirrors are not degraded.
3454

3455
  The ldisk parameter, if True, will change the test from the
3456
  is_degraded attribute (which represents overall non-ok status for
3457
  the device(s)) to the ldisk (representing the local storage status).
3458

3459
  """
3460
  lu.cfg.SetDiskID(dev, node)
3461

    
3462
  result = True
3463

    
3464
  if on_primary or dev.AssembleOnSecondary():
3465
    rstats = lu.rpc.call_blockdev_find(node, dev)
3466
    msg = rstats.fail_msg
3467
    if msg:
3468
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3469
      result = False
3470
    elif not rstats.payload:
3471
      lu.LogWarning("Can't find disk on node %s", node)
3472
      result = False
3473
    else:
3474
      if ldisk:
3475
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3476
      else:
3477
        result = result and not rstats.payload.is_degraded
3478

    
3479
  if dev.children:
3480
    for child in dev.children:
3481
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3482

    
3483
  return result
3484

    
3485

    
3486
class LUOobCommand(NoHooksLU):
3487
  """Logical unit for OOB handling.
3488

3489
  """
3490
  REG_BGL = False
3491
  _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
3492

    
3493
  def ExpandNames(self):
3494
    """Gather locks we need.
3495

3496
    """
3497
    if self.op.node_names:
3498
      self.op.node_names = _GetWantedNodes(self, self.op.node_names)
3499
      lock_names = self.op.node_names
3500
    else:
3501
      lock_names = locking.ALL_SET
3502

    
3503
    self.needed_locks = {
3504
      locking.LEVEL_NODE: lock_names,
3505
      }
3506

    
3507
  def CheckPrereq(self):
3508
    """Check prerequisites.
3509

3510
    This checks:
3511
     - the node exists in the configuration
3512
     - OOB is supported
3513

3514
    Any errors are signaled by raising errors.OpPrereqError.
3515

3516
    """
3517
    self.nodes = []
3518
    self.master_node = self.cfg.GetMasterNode()
3519

    
3520
    assert self.op.power_delay >= 0.0
3521

    
3522
    if self.op.node_names:
3523
      if (self.op.command in self._SKIP_MASTER and
3524
          self.master_node in self.op.node_names):
3525
        master_node_obj = self.cfg.GetNodeInfo(self.master_node)
3526
        master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
3527

    
3528
        if master_oob_handler:
3529
          additional_text = ("run '%s %s %s' if you want to operate on the"
3530
                             " master regardless") % (master_oob_handler,
3531
                                                      self.op.command,
3532
                                                      self.master_node)
3533
        else:
3534
          additional_text = "it does not support out-of-band operations"
3535

    
3536
        raise errors.OpPrereqError(("Operating on the master node %s is not"
3537
                                    " allowed for %s; %s") %
3538
                                   (self.master_node, self.op.command,
3539
                                    additional_text), errors.ECODE_INVAL)
3540
    else:
3541
      self.op.node_names = self.cfg.GetNodeList()
3542
      if self.op.command in self._SKIP_MASTER:
3543
        self.op.node_names.remove(self.master_node)
3544

    
3545
    if self.op.command in self._SKIP_MASTER:
3546
      assert self.master_node not in self.op.node_names
3547

    
3548
    for node_name in self.op.node_names:
3549
      node = self.cfg.GetNodeInfo(node_name)
3550

    
3551
      if node is None:
3552
        raise errors.OpPrereqError("Node %s not found" % node_name,
3553
                                   errors.ECODE_NOENT)
3554
      else:
3555
        self.nodes.append(node)
3556

    
3557
      if (not self.op.ignore_status and
3558
          (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
3559
        raise errors.OpPrereqError(("Cannot power off node %s because it is"
3560
                                    " not marked offline") % node_name,
3561
                                   errors.ECODE_STATE)
3562

    
3563
  def Exec(self, feedback_fn):
3564
    """Execute OOB and return result if we expect any.
3565

3566
    """
3567
    master_node = self.master_node
3568
    ret = []
3569

    
3570
    for idx, node in enumerate(utils.NiceSort(self.nodes,
3571
                                              key=lambda node: node.name)):
3572
      node_entry = [(constants.RS_NORMAL, node.name)]
3573
      ret.append(node_entry)
3574

    
3575
      oob_program = _SupportsOob(self.cfg, node)
3576

    
3577
      if not oob_program:
3578
        node_entry.append((constants.RS_UNAVAIL, None))
3579
        continue
3580

    
3581
      logging.info("Executing out-of-band command '%s' using '%s' on %s",
3582
                   self.op.command, oob_program, node.name)
3583
      result = self.rpc.call_run_oob(master_node, oob_program,
3584
                                     self.op.command, node.name,
3585
                                     self.op.timeout)
3586

    
3587
      if result.fail_msg:
3588
        self.LogWarning("Out-of-band RPC failed on node '%s': %s",
3589
                        node.name, result.fail_msg)
3590
        node_entry.append((constants.RS_NODATA, None))
3591
      else:
3592
        try:
3593
          self._CheckPayload(result)
3594
        except errors.OpExecError, err:
3595
          self.LogWarning("Payload returned by node '%s' is not valid: %s",
3596
                          node.name, err)
3597
          node_entry.append((constants.RS_NODATA, None))
3598
        else:
3599
          if self.op.command == constants.OOB_HEALTH:
3600
            # For health we should log important events
3601
            for item, status in result.payload:
3602
              if status in [constants.OOB_STATUS_WARNING,
3603
                            constants.OOB_STATUS_CRITICAL]:
3604
                self.LogWarning("Item '%s' on node '%s' has status '%s'",
3605
                                item, node.name, status)
3606

    
3607
          if self.op.command == constants.OOB_POWER_ON:
3608
            node.powered = True
3609
          elif self.op.command == constants.OOB_POWER_OFF:
3610
            node.powered = False
3611
          elif self.op.command == constants.OOB_POWER_STATUS:
3612
            powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
3613
            if powered != node.powered:
3614
              logging.warning(("Recorded power state (%s) of node '%s' does not"
3615
                               " match actual power state (%s)"), node.powered,
3616
                              node.name, powered)
3617

    
3618
          # For configuration changing commands we should update the node
3619
          if self.op.command in (constants.OOB_POWER_ON,
3620
                                 constants.OOB_POWER_OFF):
3621
            self.cfg.Update(node, feedback_fn)
3622

    
3623
          node_entry.append((constants.RS_NORMAL, result.payload))
3624

    
3625
          if (self.op.command == constants.OOB_POWER_ON and
3626
              idx < len(self.nodes) - 1):
3627
            time.sleep(self.op.power_delay)
3628

    
3629
    return ret
3630

    
3631
  def _CheckPayload(self, result):
3632
    """Checks if the payload is valid.
3633

3634
    @param result: RPC result
3635
    @raises errors.OpExecError: If payload is not valid
3636

3637
    """
3638
    errs = []
3639
    if self.op.command == constants.OOB_HEALTH:
3640
      if not isinstance(result.payload, list):
3641
        errs.append("command 'health' is expected to return a list but got %s" %
3642
                    type(result.payload))
3643
      else:
3644
        for item, status in result.payload:
3645
          if status not in constants.OOB_STATUSES:
3646
            errs.append("health item '%s' has invalid status '%s'" %
3647
                        (item, status))
3648

    
3649
    if self.op.command == constants.OOB_POWER_STATUS:
3650
      if not isinstance(result.payload, dict):
3651
        errs.append("power-status is expected to return a dict but got %s" %
3652
                    type(result.payload))
3653

    
3654
    if self.op.command in [
3655
        constants.OOB_POWER_ON,
3656
        constants.OOB_POWER_OFF,
3657
        constants.OOB_POWER_CYCLE,
3658
        ]:
3659
      if result.payload is not None:
3660
        errs.append("%s is expected to not return payload but got '%s'" %
3661
                    (self.op.command, result.payload))
3662

    
3663
    if errs:
3664
      raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
3665
                               utils.CommaJoin(errs))
3666

    
3667
class _OsQuery(_QueryBase):
3668
  FIELDS = query.OS_FIELDS
3669

    
3670
  def ExpandNames(self, lu):
3671
    # Lock all nodes in shared mode
3672
    # Temporary removal of locks, should be reverted later
3673
    # TODO: reintroduce locks when they are lighter-weight
3674
    lu.needed_locks = {}
3675
    #self.share_locks[locking.LEVEL_NODE] = 1
3676
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3677

    
3678
    # The following variables interact with _QueryBase._GetNames
3679
    if self.names:
3680
      self.wanted = self.names
3681
    else:
3682
      self.wanted = locking.ALL_SET
3683

    
3684
    self.do_locking = self.use_locking
3685

    
3686
  def DeclareLocks(self, lu, level):
3687
    pass
3688

    
3689
  @staticmethod
3690
  def _DiagnoseByOS(rlist):
3691
    """Remaps a per-node return list into an a per-os per-node dictionary
3692

3693
    @param rlist: a map with node names as keys and OS objects as values
3694

3695
    @rtype: dict
3696
    @return: a dictionary with osnames as keys and as value another
3697
        map, with nodes as keys and tuples of (path, status, diagnose,
3698
        variants, parameters, api_versions) as values, eg::
3699

3700
          {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
3701
                                     (/srv/..., False, "invalid api")],
3702
                           "node2": [(/srv/..., True, "", [], [])]}
3703
          }
3704

3705
    """
3706
    all_os = {}
3707
    # we build here the list of nodes that didn't fail the RPC (at RPC
3708
    # level), so that nodes with a non-responding node daemon don't
3709
    # make all OSes invalid
3710
    good_nodes = [node_name for node_name in rlist
3711
                  if not rlist[node_name].fail_msg]
3712
    for node_name, nr in rlist.items():
3713
      if nr.fail_msg or not nr.payload:
3714
        continue
3715
      for (name, path, status, diagnose, variants,
3716
           params, api_versions) in nr.payload:
3717
        if name not in all_os:
3718
          # build a list of nodes for this os containing empty lists
3719
          # for each node in node_list
3720
          all_os[name] = {}
3721
          for nname in good_nodes:
3722
            all_os[name][nname] = []
3723
        # convert params from [name, help] to (name, help)
3724
        params = [tuple(v) for v in params]
3725
        all_os[name][node_name].append((path, status, diagnose,
3726
                                        variants, params, api_versions))
3727
    return all_os
3728

    
3729
  def _GetQueryData(self, lu):
3730
    """Computes the list of nodes and their attributes.
3731

3732
    """
3733
    # Locking is not used
3734
    assert not (compat.any(lu.glm.is_owned(level)
3735
                           for level in locking.LEVELS
3736
                           if level != locking.LEVEL_CLUSTER) or
3737
                self.do_locking or self.use_locking)
3738

    
3739
    valid_nodes = [node.name
3740
                   for node in lu.cfg.GetAllNodesInfo().values()
3741
                   if not node.offline and node.vm_capable]
3742
    pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_nodes))
3743
    cluster = lu.cfg.GetClusterInfo()
3744

    
3745
    data = {}
3746

    
3747
    for (os_name, os_data) in pol.items():
3748
      info = query.OsInfo(name=os_name, valid=True, node_status=os_data,
3749
                          hidden=(os_name in cluster.hidden_os),
3750
                          blacklisted=(os_name in cluster.blacklisted_os))
3751

    
3752
      variants = set()
3753
      parameters = set()
3754
      api_versions = set()
3755

    
3756
      for idx, osl in enumerate(os_data.values()):
3757
        info.valid = bool(info.valid and osl and osl[0][1])
3758
        if not info.valid:
3759
          break
3760

    
3761
        (node_variants, node_params, node_api) = osl[0][3:6]
3762
        if idx == 0:
3763
          # First entry
3764
          variants.update(node_variants)
3765
          parameters.update(node_params)
3766
          api_versions.update(node_api)
3767
        else:
3768
          # Filter out inconsistent values
3769
          variants.intersection_update(node_variants)
3770
          parameters.intersection_update(node_params)
3771
          api_versions.intersection_update(node_api)
3772

    
3773
      info.variants = list(variants)
3774
      info.parameters = list(parameters)
3775
      info.api_versions = list(api_versions)
3776

    
3777
      data[os_name] = info
3778

    
3779
    # Prepare data in requested order
3780
    return [data[name] for name in self._GetNames(lu, pol.keys(), None)
3781
            if name in data]
3782

    
3783

    
3784
class LUOsDiagnose(NoHooksLU):
3785
  """Logical unit for OS diagnose/query.
3786

3787
  """
3788
  REQ_BGL = False
3789

    
3790
  @staticmethod
3791
  def _BuildFilter(fields, names):
3792
    """Builds a filter for querying OSes.
3793

3794
    """
3795
    name_filter = qlang.MakeSimpleFilter("name", names)
3796

    
3797
    # Legacy behaviour: Hide hidden, blacklisted or invalid OSes if the
3798
    # respective field is not requested
3799
    status_filter = [[qlang.OP_NOT, [qlang.OP_TRUE, fname]]
3800
                     for fname in ["hidden", "blacklisted"]
3801
                     if fname not in fields]
3802
    if "valid" not in fields:
3803
      status_filter.append([qlang.OP_TRUE, "valid"])
3804

    
3805
    if status_filter:
3806
      status_filter.insert(0, qlang.OP_AND)
3807
    else:
3808
      status_filter = None
3809

    
3810
    if name_filter and status_filter:
3811
      return [qlang.OP_AND, name_filter, status_filter]
3812
    elif name_filter:
3813
      return name_filter
3814
    else:
3815
      return status_filter
3816

    
3817
  def CheckArguments(self):
3818
    self.oq = _OsQuery(self._BuildFilter(self.op.output_fields, self.op.names),
3819
                       self.op.output_fields, False)
3820

    
3821
  def ExpandNames(self):
3822
    self.oq.ExpandNames(self)
3823

    
3824
  def Exec(self, feedback_fn):
3825
    return self.oq.OldStyleQuery(self)
3826

    
3827

    
3828
class LUNodeRemove(LogicalUnit):
3829
  """Logical unit for removing a node.
3830

3831
  """
3832
  HPATH = "node-remove"
3833
  HTYPE = constants.HTYPE_NODE
3834

    
3835
  def BuildHooksEnv(self):
3836
    """Build hooks env.
3837

3838
    This doesn't run on the target node in the pre phase as a failed
3839
    node would then be impossible to remove.
3840

3841
    """
3842
    return {
3843
      "OP_TARGET": self.op.node_name,
3844
      "NODE_NAME": self.op.node_name,
3845
      }
3846

    
3847
  def BuildHooksNodes(self):
3848
    """Build hooks nodes.
3849

3850
    """
3851
    all_nodes = self.cfg.GetNodeList()
3852
    try:
3853
      all_nodes.remove(self.op.node_name)
3854
    except ValueError:
3855
      logging.warning("Node '%s', which is about to be removed, was not found"
3856
                      " in the list of all nodes", self.op.node_name)
3857
    return (all_nodes, all_nodes)
3858

    
3859
  def CheckPrereq(self):
3860
    """Check prerequisites.
3861

3862
    This checks:
3863
     - the node exists in the configuration
3864
     - it does not have primary or secondary instances
3865
     - it's not the master
3866

3867
    Any errors are signaled by raising errors.OpPrereqError.
3868

3869
    """
3870
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3871
    node = self.cfg.GetNodeInfo(self.op.node_name)
3872
    assert node is not None
3873

    
3874
    instance_list = self.cfg.GetInstanceList()
3875

    
3876
    masternode = self.cfg.GetMasterNode()
3877
    if node.name == masternode:
3878
      raise errors.OpPrereqError("Node is the master node, failover to another"
3879
                                 " node is required", errors.ECODE_INVAL)
3880

    
3881
    for instance_name in instance_list:
3882
      instance = self.cfg.GetInstanceInfo(instance_name)
3883
      if node.name in instance.all_nodes:
3884
        raise errors.OpPrereqError("Instance %s is still running on the node,"
3885
                                   " please remove first" % instance_name,
3886
                                   errors.ECODE_INVAL)
3887
    self.op.node_name = node.name
3888
    self.node = node
3889

    
3890
  def Exec(self, feedback_fn):
3891
    """Removes the node from the cluster.
3892

3893
    """
3894
    node = self.node
3895
    logging.info("Stopping the node daemon and removing configs from node %s",
3896
                 node.name)
3897

    
3898
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
3899

    
3900
    # Promote nodes to master candidate as needed
3901
    _AdjustCandidatePool(self, exceptions=[node.name])
3902
    self.context.RemoveNode(node.name)
3903

    
3904
    # Run post hooks on the node before it's removed
3905
    _RunPostHook(self, node.name)
3906

    
3907
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
3908
    msg = result.fail_msg
3909
    if msg:
3910
      self.LogWarning("Errors encountered on the remote node while leaving"
3911
                      " the cluster: %s", msg)
3912

    
3913
    # Remove node from our /etc/hosts
3914
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3915
      master_node = self.cfg.GetMasterNode()
3916
      result = self.rpc.call_etc_hosts_modify(master_node,
3917
                                              constants.ETC_HOSTS_REMOVE,
3918
                                              node.name, None)
3919
      result.Raise("Can't update hosts file with new host data")
3920
      _RedistributeAncillaryFiles(self)
3921

    
3922

    
3923
class _NodeQuery(_QueryBase):
3924
  FIELDS = query.NODE_FIELDS
3925

    
3926
  def ExpandNames(self, lu):
3927
    lu.needed_locks = {}
3928
    lu.share_locks[locking.LEVEL_NODE] = 1
3929

    
3930
    if self.names:
3931
      self.wanted = _GetWantedNodes(lu, self.names)
3932
    else:
3933
      self.wanted = locking.ALL_SET
3934

    
3935
    self.do_locking = (self.use_locking and
3936
                       query.NQ_LIVE in self.requested_data)
3937

    
3938
    if self.do_locking:
3939
      # if we don't request only static fields, we need to lock the nodes
3940
      lu.needed_locks[locking.LEVEL_NODE] = self.wanted
3941

    
3942
  def DeclareLocks(self, lu, level):
3943
    pass
3944

    
3945
  def _GetQueryData(self, lu):
3946
    """Computes the list of nodes and their attributes.
3947

3948
    """
3949
    all_info = lu.cfg.GetAllNodesInfo()
3950

    
3951
    nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
3952

    
3953
    # Gather data as requested
3954
    if query.NQ_LIVE in self.requested_data:
3955
      # filter out non-vm_capable nodes
3956
      toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
3957

    
3958
      node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(),
3959
                                        lu.cfg.GetHypervisorType())
3960
      live_data = dict((name, nresult.payload)
3961
                       for (name, nresult) in node_data.items()
3962
                       if not nresult.fail_msg and nresult.payload)
3963
    else:
3964
      live_data = None
3965

    
3966
    if query.NQ_INST in self.requested_data:
3967
      node_to_primary = dict([(name, set()) for name in nodenames])
3968
      node_to_secondary = dict([(name, set()) for name in nodenames])
3969

    
3970
      inst_data = lu.cfg.GetAllInstancesInfo()
3971

    
3972
      for inst in inst_data.values():
3973
        if inst.primary_node in node_to_primary:
3974
          node_to_primary[inst.primary_node].add(inst.name)
3975
        for secnode in inst.secondary_nodes:
3976
          if secnode in node_to_secondary:
3977
            node_to_secondary[secnode].add(inst.name)
3978
    else:
3979
      node_to_primary = None
3980
      node_to_secondary = None
3981

    
3982
    if query.NQ_OOB in self.requested_data:
3983
      oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
3984
                         for name, node in all_info.iteritems())
3985
    else:
3986
      oob_support = None
3987

    
3988
    if query.NQ_GROUP in self.requested_data:
3989
      groups = lu.cfg.GetAllNodeGroupsInfo()
3990
    else:
3991
      groups = {}
3992

    
3993
    return query.NodeQueryData([all_info[name] for name in nodenames],
3994
                               live_data, lu.cfg.GetMasterNode(),
3995
                               node_to_primary, node_to_secondary, groups,
3996
                               oob_support, lu.cfg.GetClusterInfo())
3997

    
3998

    
3999
class LUNodeQuery(NoHooksLU):
4000
  """Logical unit for querying nodes.
4001

4002
  """
4003
  # pylint: disable-msg=W0142
4004
  REQ_BGL = False
4005

    
4006
  def CheckArguments(self):
4007
    self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
4008
                         self.op.output_fields, self.op.use_locking)
4009

    
4010
  def ExpandNames(self):
4011
    self.nq.ExpandNames(self)
4012

    
4013
  def Exec(self, feedback_fn):
4014
    return self.nq.OldStyleQuery(self)
4015

    
4016

    
4017
class LUNodeQueryvols(NoHooksLU):
4018
  """Logical unit for getting volumes on node(s).
4019

4020
  """
4021
  REQ_BGL = False
4022
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
4023
  _FIELDS_STATIC = utils.FieldSet("node")
4024

    
4025
  def CheckArguments(self):
4026
    _CheckOutputFields(static=self._FIELDS_STATIC,
4027
                       dynamic=self._FIELDS_DYNAMIC,
4028
                       selected=self.op.output_fields)
4029

    
4030
  def ExpandNames(self):
4031
    self.needed_locks = {}
4032
    self.share_locks[locking.LEVEL_NODE] = 1
4033
    if not self.op.nodes:
4034
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4035
    else:
4036
      self.needed_locks[locking.LEVEL_NODE] = \
4037
        _GetWantedNodes(self, self.op.nodes)
4038

    
4039
  def Exec(self, feedback_fn):
4040
    """Computes the list of nodes and their attributes.
4041

4042
    """
4043
    nodenames = self.glm.list_owned(locking.LEVEL_NODE)
4044
    volumes = self.rpc.call_node_volumes(nodenames)
4045

    
4046
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
4047
             in self.cfg.GetInstanceList()]
4048

    
4049
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
4050

    
4051
    output = []
4052
    for node in nodenames:
4053
      nresult = volumes[node]
4054
      if nresult.offline:
4055
        continue
4056
      msg = nresult.fail_msg
4057
      if msg:
4058
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
4059
        continue
4060

    
4061
      node_vols = nresult.payload[:]
4062
      node_vols.sort(key=lambda vol: vol['dev'])
4063

    
4064
      for vol in node_vols:
4065
        node_output = []
4066
        for field in self.op.output_fields:
4067
          if field == "node":
4068
            val = node
4069
          elif field == "phys":
4070
            val = vol['dev']
4071
          elif field == "vg":
4072
            val = vol['vg']
4073
          elif field == "name":
4074
            val = vol['name']
4075
          elif field == "size":
4076
            val = int(float(vol['size']))
4077
          elif field == "instance":
4078
            for inst in ilist:
4079
              if node not in lv_by_node[inst]:
4080
                continue
4081
              if vol['name'] in lv_by_node[inst][node]:
4082
                val = inst.name
4083
                break
4084
            else:
4085
              val = '-'
4086
          else:
4087
            raise errors.ParameterError(field)
4088
          node_output.append(str(val))
4089

    
4090
        output.append(node_output)
4091

    
4092
    return output
4093

    
4094

    
4095
class LUNodeQueryStorage(NoHooksLU):
4096
  """Logical unit for getting information on storage units on node(s).
4097

4098
  """
4099
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
4100
  REQ_BGL = False
4101

    
4102
  def CheckArguments(self):
4103
    _CheckOutputFields(static=self._FIELDS_STATIC,
4104
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
4105
                       selected=self.op.output_fields)
4106

    
4107
  def ExpandNames(self):
4108
    self.needed_locks = {}
4109
    self.share_locks[locking.LEVEL_NODE] = 1
4110

    
4111
    if self.op.nodes:
4112
      self.needed_locks[locking.LEVEL_NODE] = \
4113
        _GetWantedNodes(self, self.op.nodes)
4114
    else:
4115
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4116

    
4117
  def Exec(self, feedback_fn):
4118
    """Computes the list of nodes and their attributes.
4119

4120
    """
4121
    self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
4122

    
4123
    # Always get name to sort by
4124
    if constants.SF_NAME in self.op.output_fields:
4125
      fields = self.op.output_fields[:]
4126
    else:
4127
      fields = [constants.SF_NAME] + self.op.output_fields
4128

    
4129
    # Never ask for node or type as it's only known to the LU
4130
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
4131
      while extra in fields:
4132
        fields.remove(extra)
4133

    
4134
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
4135
    name_idx = field_idx[constants.SF_NAME]
4136

    
4137
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4138
    data = self.rpc.call_storage_list(self.nodes,
4139
                                      self.op.storage_type, st_args,
4140
                                      self.op.name, fields)
4141

    
4142
    result = []
4143

    
4144
    for node in utils.NiceSort(self.nodes):
4145
      nresult = data[node]
4146
      if nresult.offline:
4147
        continue
4148

    
4149
      msg = nresult.fail_msg
4150
      if msg:
4151
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
4152
        continue
4153

    
4154
      rows = dict([(row[name_idx], row) for row in nresult.payload])
4155

    
4156
      for name in utils.NiceSort(rows.keys()):
4157
        row = rows[name]
4158

    
4159
        out = []
4160

    
4161
        for field in self.op.output_fields:
4162
          if field == constants.SF_NODE:
4163
            val = node
4164
          elif field == constants.SF_TYPE:
4165
            val = self.op.storage_type
4166
          elif field in field_idx:
4167
            val = row[field_idx[field]]
4168
          else:
4169
            raise errors.ParameterError(field)
4170

    
4171
          out.append(val)
4172

    
4173
        result.append(out)
4174

    
4175
    return result
4176

    
4177

    
4178
class _InstanceQuery(_QueryBase):
4179
  FIELDS = query.INSTANCE_FIELDS
4180

    
4181
  def ExpandNames(self, lu):
4182
    lu.needed_locks = {}
4183
    lu.share_locks[locking.LEVEL_INSTANCE] = 1
4184
    lu.share_locks[locking.LEVEL_NODE] = 1
4185

    
4186
    if self.names:
4187
      self.wanted = _GetWantedInstances(lu, self.names)
4188
    else:
4189
      self.wanted = locking.ALL_SET
4190

    
4191
    self.do_locking = (self.use_locking and
4192
                       query.IQ_LIVE in self.requested_data)
4193
    if self.do_locking:
4194
      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
4195
      lu.needed_locks[locking.LEVEL_NODE] = []
4196
      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4197

    
4198
  def DeclareLocks(self, lu, level):
4199
    if level == locking.LEVEL_NODE and self.do_locking:
4200
      lu._LockInstancesNodes() # pylint: disable-msg=W0212
4201

    
4202
  def _GetQueryData(self, lu):
4203
    """Computes the list of instances and their attributes.
4204

4205
    """
4206
    cluster = lu.cfg.GetClusterInfo()
4207
    all_info = lu.cfg.GetAllInstancesInfo()
4208

    
4209
    instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
4210

    
4211
    instance_list = [all_info[name] for name in instance_names]
4212
    nodes = frozenset(itertools.chain(*(inst.all_nodes
4213
                                        for inst in instance_list)))
4214
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
4215
    bad_nodes = []
4216
    offline_nodes = []
4217
    wrongnode_inst = set()
4218

    
4219
    # Gather data as requested
4220
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
4221
      live_data = {}
4222
      node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
4223
      for name in nodes:
4224
        result = node_data[name]
4225
        if result.offline:
4226
          # offline nodes will be in both lists
4227
          assert result.fail_msg
4228
          offline_nodes.append(name)
4229
        if result.fail_msg:
4230
          bad_nodes.append(name)
4231
        elif result.payload:
4232
          for inst in result.payload:
4233
            if inst in all_info:
4234
              if all_info[inst].primary_node == name:
4235
                live_data.update(result.payload)
4236
              else:
4237
                wrongnode_inst.add(inst)
4238
            else:
4239
              # orphan instance; we don't list it here as we don't
4240
              # handle this case yet in the output of instance listing
4241
              logging.warning("Orphan instance '%s' found on node %s",
4242
                              inst, name)
4243
        # else no instance is alive
4244
    else:
4245
      live_data = {}
4246

    
4247
    if query.IQ_DISKUSAGE in self.requested_data:
4248
      disk_usage = dict((inst.name,
4249
                         _ComputeDiskSize(inst.disk_template,
4250
                                          [{constants.IDISK_SIZE: disk.size}
4251
                                           for disk in inst.disks]))
4252
                        for inst in instance_list)
4253
    else:
4254
      disk_usage = None
4255

    
4256
    if query.IQ_CONSOLE in self.requested_data:
4257
      consinfo = {}
4258
      for inst in instance_list:
4259
        if inst.name in live_data:
4260
          # Instance is running
4261
          consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
4262
        else:
4263
          consinfo[inst.name] = None
4264
      assert set(consinfo.keys()) == set(instance_names)
4265
    else:
4266
      consinfo = None
4267

    
4268
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
4269
                                   disk_usage, offline_nodes, bad_nodes,
4270
                                   live_data, wrongnode_inst, consinfo)
4271

    
4272

    
4273
class LUQuery(NoHooksLU):
4274
  """Query for resources/items of a certain kind.
4275

4276
  """
4277
  # pylint: disable-msg=W0142
4278
  REQ_BGL = False
4279

    
4280
  def CheckArguments(self):
4281
    qcls = _GetQueryImplementation(self.op.what)
4282

    
4283
    self.impl = qcls(self.op.filter, self.op.fields, False)
4284

    
4285
  def ExpandNames(self):
4286
    self.impl.ExpandNames(self)
4287

    
4288
  def DeclareLocks(self, level):
4289
    self.impl.DeclareLocks(self, level)
4290

    
4291
  def Exec(self, feedback_fn):
4292
    return self.impl.NewStyleQuery(self)
4293

    
4294

    
4295
class LUQueryFields(NoHooksLU):
4296
  """Query for resources/items of a certain kind.
4297

4298
  """
4299
  # pylint: disable-msg=W0142
4300
  REQ_BGL = False
4301

    
4302
  def CheckArguments(self):
4303
    self.qcls = _GetQueryImplementation(self.op.what)
4304

    
4305
  def ExpandNames(self):
4306
    self.needed_locks = {}
4307

    
4308
  def Exec(self, feedback_fn):
4309
    return query.QueryFields(self.qcls.FIELDS, self.op.fields)
4310

    
4311

    
4312
class LUNodeModifyStorage(NoHooksLU):
4313
  """Logical unit for modifying a storage volume on a node.
4314

4315
  """
4316
  REQ_BGL = False
4317

    
4318
  def CheckArguments(self):
4319
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4320

    
4321
    storage_type = self.op.storage_type
4322

    
4323
    try:
4324
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
4325
    except KeyError:
4326
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
4327
                                 " modified" % storage_type,
4328
                                 errors.ECODE_INVAL)
4329

    
4330
    diff = set(self.op.changes.keys()) - modifiable
4331
    if diff:
4332
      raise errors.OpPrereqError("The following fields can not be modified for"
4333
                                 " storage units of type '%s': %r" %
4334
                                 (storage_type, list(diff)),
4335
                                 errors.ECODE_INVAL)
4336

    
4337
  def ExpandNames(self):
4338
    self.needed_locks = {
4339
      locking.LEVEL_NODE: self.op.node_name,
4340
      }
4341

    
4342
  def Exec(self, feedback_fn):
4343
    """Computes the list of nodes and their attributes.
4344

4345
    """
4346
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4347
    result = self.rpc.call_storage_modify(self.op.node_name,
4348
                                          self.op.storage_type, st_args,
4349
                                          self.op.name, self.op.changes)
4350
    result.Raise("Failed to modify storage unit '%s' on %s" %
4351
                 (self.op.name, self.op.node_name))
4352

    
4353

    
4354
class LUNodeAdd(LogicalUnit):
4355
  """Logical unit for adding node to the cluster.
4356

4357
  """
4358
  HPATH = "node-add"
4359
  HTYPE = constants.HTYPE_NODE
4360
  _NFLAGS = ["master_capable", "vm_capable"]
4361

    
4362
  def CheckArguments(self):
4363
    self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
4364
    # validate/normalize the node name
4365
    self.hostname = netutils.GetHostname(name=self.op.node_name,
4366
                                         family=self.primary_ip_family)
4367
    self.op.node_name = self.hostname.name
4368

    
4369
    if self.op.readd and self.op.node_name == self.cfg.GetMasterNode():
4370
      raise errors.OpPrereqError("Cannot readd the master node",
4371
                                 errors.ECODE_STATE)
4372

    
4373
    if self.op.readd and self.op.group:
4374
      raise errors.OpPrereqError("Cannot pass a node group when a node is"
4375
                                 " being readded", errors.ECODE_INVAL)
4376

    
4377
  def BuildHooksEnv(self):
4378
    """Build hooks env.
4379

4380
    This will run on all nodes before, and on all nodes + the new node after.
4381

4382
    """
4383
    return {
4384
      "OP_TARGET": self.op.node_name,
4385
      "NODE_NAME": self.op.node_name,
4386
      "NODE_PIP": self.op.primary_ip,
4387
      "NODE_SIP": self.op.secondary_ip,
4388
      "MASTER_CAPABLE": str(self.op.master_capable),
4389
      "VM_CAPABLE": str(self.op.vm_capable),
4390
      }
4391

    
4392
  def BuildHooksNodes(self):
4393
    """Build hooks nodes.
4394

4395
    """
4396
    # Exclude added node
4397
    pre_nodes = list(set(self.cfg.GetNodeList()) - set([self.op.node_name]))
4398
    post_nodes = pre_nodes + [self.op.node_name, ]
4399

    
4400
    return (pre_nodes, post_nodes)
4401

    
4402
  def CheckPrereq(self):
4403
    """Check prerequisites.
4404

4405
    This checks:
4406
     - the new node is not already in the config
4407
     - it is resolvable
4408
     - its parameters (single/dual homed) matches the cluster
4409

4410
    Any errors are signaled by raising errors.OpPrereqError.
4411

4412
    """
4413
    cfg = self.cfg
4414
    hostname = self.hostname
4415
    node = hostname.name
4416
    primary_ip = self.op.primary_ip = hostname.ip
4417
    if self.op.secondary_ip is None:
4418
      if self.primary_ip_family == netutils.IP6Address.family:
4419
        raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
4420
                                   " IPv4 address must be given as secondary",
4421
                                   errors.ECODE_INVAL)
4422
      self.op.secondary_ip = primary_ip
4423

    
4424
    secondary_ip = self.op.secondary_ip
4425
    if not netutils.IP4Address.IsValid(secondary_ip):
4426
      raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4427
                                 " address" % secondary_ip, errors.ECODE_INVAL)
4428

    
4429
    node_list = cfg.GetNodeList()
4430
    if not self.op.readd and node in node_list:
4431
      raise errors.OpPrereqError("Node %s is already in the configuration" %
4432
                                 node, errors.ECODE_EXISTS)
4433
    elif self.op.readd and node not in node_list:
4434
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
4435
                                 errors.ECODE_NOENT)
4436

    
4437
    self.changed_primary_ip = False
4438

    
4439
    for existing_node_name in node_list:
4440
      existing_node = cfg.GetNodeInfo(existing_node_name)
4441

    
4442
      if self.op.readd and node == existing_node_name:
4443
        if existing_node.secondary_ip != secondary_ip:
4444
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
4445
                                     " address configuration as before",
4446
                                     errors.ECODE_INVAL)
4447
        if existing_node.primary_ip != primary_ip:
4448
          self.changed_primary_ip = True
4449

    
4450
        continue
4451

    
4452
      if (existing_node.primary_ip == primary_ip or
4453
          existing_node.secondary_ip == primary_ip or
4454
          existing_node.primary_ip == secondary_ip or
4455
          existing_node.secondary_ip == secondary_ip):
4456
        raise errors.OpPrereqError("New node ip address(es) conflict with"
4457
                                   " existing node %s" % existing_node.name,
4458
                                   errors.ECODE_NOTUNIQUE)
4459

    
4460
    # After this 'if' block, None is no longer a valid value for the
4461
    # _capable op attributes
4462
    if self.op.readd:
4463
      old_node = self.cfg.GetNodeInfo(node)
4464
      assert old_node is not None, "Can't retrieve locked node %s" % node
4465
      for attr in self._NFLAGS:
4466
        if getattr(self.op, attr) is None:
4467
          setattr(self.op, attr, getattr(old_node, attr))
4468
    else:
4469
      for attr in self._NFLAGS:
4470
        if getattr(self.op, attr) is None:
4471
          setattr(self.op, attr, True)
4472

    
4473
    if self.op.readd and not self.op.vm_capable:
4474
      pri, sec = cfg.GetNodeInstances(node)
4475
      if pri or sec:
4476
        raise errors.OpPrereqError("Node %s being re-added with vm_capable"
4477
                                   " flag set to false, but it already holds"
4478
                                   " instances" % node,
4479
                                   errors.ECODE_STATE)
4480

    
4481
    # check that the type of the node (single versus dual homed) is the
4482
    # same as for the master
4483
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
4484
    master_singlehomed = myself.secondary_ip == myself.primary_ip
4485
    newbie_singlehomed = secondary_ip == primary_ip
4486
    if master_singlehomed != newbie_singlehomed:
4487
      if master_singlehomed:
4488
        raise errors.OpPrereqError("The master has no secondary ip but the"
4489
                                   " new node has one",
4490
                                   errors.ECODE_INVAL)
4491
      else:
4492
        raise errors.OpPrereqError("The master has a secondary ip but the"
4493
                                   " new node doesn't have one",
4494
                                   errors.ECODE_INVAL)
4495

    
4496
    # checks reachability
4497
    if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
4498
      raise errors.OpPrereqError("Node not reachable by ping",
4499
                                 errors.ECODE_ENVIRON)
4500

    
4501
    if not newbie_singlehomed:
4502
      # check reachability from my secondary ip to newbie's secondary ip
4503
      if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
4504
                           source=myself.secondary_ip):
4505
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4506
                                   " based ping to node daemon port",
4507
                                   errors.ECODE_ENVIRON)
4508

    
4509
    if self.op.readd:
4510
      exceptions = [node]
4511
    else:
4512
      exceptions = []
4513

    
4514
    if self.op.master_capable:
4515
      self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
4516
    else:
4517
      self.master_candidate = False
4518

    
4519
    if self.op.readd:
4520
      self.new_node = old_node
4521
    else:
4522
      node_group = cfg.LookupNodeGroup(self.op.group)
4523
      self.new_node = objects.Node(name=node,
4524
                                   primary_ip=primary_ip,
4525
                                   secondary_ip=secondary_ip,
4526
                                   master_candidate=self.master_candidate,
4527
                                   offline=False, drained=False,
4528
                                   group=node_group)
4529

    
4530
    if self.op.ndparams:
4531
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
4532

    
4533
  def Exec(self, feedback_fn):
4534
    """Adds the new node to the cluster.
4535

4536
    """
4537
    new_node = self.new_node
4538
    node = new_node.name
4539

    
4540
    # We adding a new node so we assume it's powered
4541
    new_node.powered = True
4542

    
4543
    # for re-adds, reset the offline/drained/master-candidate flags;
4544
    # we need to reset here, otherwise offline would prevent RPC calls
4545
    # later in the procedure; this also means that if the re-add
4546
    # fails, we are left with a non-offlined, broken node
4547
    if self.op.readd:
4548
      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
4549
      self.LogInfo("Readding a node, the offline/drained flags were reset")
4550
      # if we demote the node, we do cleanup later in the procedure
4551
      new_node.master_candidate = self.master_candidate
4552
      if self.changed_primary_ip:
4553
        new_node.primary_ip = self.op.primary_ip
4554

    
4555
    # copy the master/vm_capable flags
4556
    for attr in self._NFLAGS:
4557
      setattr(new_node, attr, getattr(self.op, attr))
4558

    
4559
    # notify the user about any possible mc promotion
4560
    if new_node.master_candidate:
4561
      self.LogInfo("Node will be a master candidate")
4562

    
4563
    if self.op.ndparams:
4564
      new_node.ndparams = self.op.ndparams
4565
    else:
4566
      new_node.ndparams = {}
4567

    
4568
    # check connectivity
4569
    result = self.rpc.call_version([node])[node]
4570
    result.Raise("Can't get version information from node %s" % node)
4571
    if constants.PROTOCOL_VERSION == result.payload:
4572
      logging.info("Communication to node %s fine, sw version %s match",
4573
                   node, result.payload)
4574
    else:
4575
      raise errors.OpExecError("Version mismatch master version %s,"
4576
                               " node version %s" %
4577
                               (constants.PROTOCOL_VERSION, result.payload))
4578

    
4579
    # Add node to our /etc/hosts, and add key to known_hosts
4580
    if self.cfg.GetClusterInfo().modify_etc_hosts:
4581
      master_node = self.cfg.GetMasterNode()
4582
      result = self.rpc.call_etc_hosts_modify(master_node,
4583
                                              constants.ETC_HOSTS_ADD,
4584
                                              self.hostname.name,
4585
                                              self.hostname.ip)
4586
      result.Raise("Can't update hosts file with new host data")
4587

    
4588
    if new_node.secondary_ip != new_node.primary_ip:
4589
      _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
4590
                               False)
4591

    
4592
    node_verify_list = [self.cfg.GetMasterNode()]
4593
    node_verify_param = {
4594
      constants.NV_NODELIST: [node],
4595
      # TODO: do a node-net-test as well?
4596
    }
4597

    
4598
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
4599
                                       self.cfg.GetClusterName())
4600
    for verifier in node_verify_list:
4601
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
4602
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
4603
      if nl_payload:
4604
        for failed in nl_payload:
4605
          feedback_fn("ssh/hostname verification failed"
4606
                      " (checking from %s): %s" %
4607
                      (verifier, nl_payload[failed]))
4608
        raise errors.OpExecError("ssh/hostname verification failed")
4609

    
4610
    if self.op.readd:
4611
      _RedistributeAncillaryFiles(self)
4612
      self.context.ReaddNode(new_node)
4613
      # make sure we redistribute the config
4614
      self.cfg.Update(new_node, feedback_fn)
4615
      # and make sure the new node will not have old files around
4616
      if not new_node.master_candidate:
4617
        result = self.rpc.call_node_demote_from_mc(new_node.name)
4618
        msg = result.fail_msg
4619
        if msg:
4620
          self.LogWarning("Node failed to demote itself from master"
4621
                          " candidate status: %s" % msg)
4622
    else:
4623
      _RedistributeAncillaryFiles(self, additional_nodes=[node],
4624
                                  additional_vm=self.op.vm_capable)
4625
      self.context.AddNode(new_node, self.proc.GetECId())
4626

    
4627

    
4628
class LUNodeSetParams(LogicalUnit):
4629
  """Modifies the parameters of a node.
4630

4631
  @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
4632
      to the node role (as _ROLE_*)
4633
  @cvar _R2F: a dictionary from node role to tuples of flags
4634
  @cvar _FLAGS: a list of attribute names corresponding to the flags
4635

4636
  """
4637
  HPATH = "node-modify"
4638
  HTYPE = constants.HTYPE_NODE
4639
  REQ_BGL = False
4640
  (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
4641
  _F2R = {
4642
    (True, False, False): _ROLE_CANDIDATE,
4643
    (False, True, False): _ROLE_DRAINED,
4644
    (False, False, True): _ROLE_OFFLINE,
4645
    (False, False, False): _ROLE_REGULAR,
4646
    }
4647
  _R2F = dict((v, k) for k, v in _F2R.items())
4648
  _FLAGS = ["master_candidate", "drained", "offline"]
4649

    
4650
  def CheckArguments(self):
4651
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4652
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
4653
                self.op.master_capable, self.op.vm_capable,
4654
                self.op.secondary_ip, self.op.ndparams]
4655
    if all_mods.count(None) == len(all_mods):
4656
      raise errors.OpPrereqError("Please pass at least one modification",
4657
                                 errors.ECODE_INVAL)
4658
    if all_mods.count(True) > 1:
4659
      raise errors.OpPrereqError("Can't set the node into more than one"
4660
                                 " state at the same time",
4661
                                 errors.ECODE_INVAL)
4662

    
4663
    # Boolean value that tells us whether we might be demoting from MC
4664
    self.might_demote = (self.op.master_candidate == False or
4665
                         self.op.offline == True or
4666
                         self.op.drained == True or
4667
                         self.op.master_capable == False)
4668

    
4669
    if self.op.secondary_ip:
4670
      if not netutils.IP4Address.IsValid(self.op.secondary_ip):
4671
        raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4672
                                   " address" % self.op.secondary_ip,
4673
                                   errors.ECODE_INVAL)
4674

    
4675
    self.lock_all = self.op.auto_promote and self.might_demote
4676
    self.lock_instances = self.op.secondary_ip is not None
4677

    
4678
  def ExpandNames(self):
4679
    if self.lock_all:
4680
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
4681
    else:
4682
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
4683

    
4684
    if self.lock_instances:
4685
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4686

    
4687
  def DeclareLocks(self, level):
4688
    # If we have locked all instances, before waiting to lock nodes, release
4689
    # all the ones living on nodes unrelated to the current operation.
4690
    if level == locking.LEVEL_NODE and self.lock_instances:
4691
      self.affected_instances = []
4692
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4693
        instances_keep = []
4694

    
4695
        # Build list of instances to release
4696
        for instance_name in self.glm.list_owned(locking.LEVEL_INSTANCE):
4697
          instance = self.context.cfg.GetInstanceInfo(instance_name)
4698
          if (instance.disk_template in constants.DTS_INT_MIRROR and
4699
              self.op.node_name in instance.all_nodes):
4700
            instances_keep.append(instance_name)
4701
            self.affected_instances.append(instance)
4702

    
4703
        _ReleaseLocks(self, locking.LEVEL_INSTANCE, keep=instances_keep)
4704

    
4705
        assert (set(self.glm.list_owned(locking.LEVEL_INSTANCE)) ==
4706
                set(instances_keep))
4707

    
4708
  def BuildHooksEnv(self):
4709
    """Build hooks env.
4710

4711
    This runs on the master node.
4712

4713
    """
4714
    return {
4715
      "OP_TARGET": self.op.node_name,
4716
      "MASTER_CANDIDATE": str(self.op.master_candidate),
4717
      "OFFLINE": str(self.op.offline),
4718
      "DRAINED": str(self.op.drained),
4719
      "MASTER_CAPABLE": str(self.op.master_capable),
4720
      "VM_CAPABLE": str(self.op.vm_capable),
4721
      }
4722

    
4723
  def BuildHooksNodes(self):
4724
    """Build hooks nodes.
4725

4726
    """
4727
    nl = [self.cfg.GetMasterNode(), self.op.node_name]
4728
    return (nl, nl)
4729

    
4730
  def CheckPrereq(self):
4731
    """Check prerequisites.
4732

4733
    This only checks the instance list against the existing names.
4734

4735
    """
4736
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
4737

    
4738
    if (self.op.master_candidate is not None or
4739
        self.op.drained is not None or
4740
        self.op.offline is not None):
4741
      # we can't change the master's node flags
4742
      if self.op.node_name == self.cfg.GetMasterNode():
4743
        raise errors.OpPrereqError("The master role can be changed"
4744
                                   " only via master-failover",
4745
                                   errors.ECODE_INVAL)
4746

    
4747
    if self.op.master_candidate and not node.master_capable:
4748
      raise errors.OpPrereqError("Node %s is not master capable, cannot make"
4749
                                 " it a master candidate" % node.name,
4750
                                 errors.ECODE_STATE)
4751

    
4752
    if self.op.vm_capable == False:
4753
      (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
4754
      if ipri or isec:
4755
        raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
4756
                                   " the vm_capable flag" % node.name,
4757
                                   errors.ECODE_STATE)
4758

    
4759
    if node.master_candidate and self.might_demote and not self.lock_all:
4760
      assert not self.op.auto_promote, "auto_promote set but lock_all not"
4761
      # check if after removing the current node, we're missing master
4762
      # candidates
4763
      (mc_remaining, mc_should, _) = \
4764
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
4765
      if mc_remaining < mc_should:
4766
        raise errors.OpPrereqError("Not enough master candidates, please"
4767
                                   " pass auto promote option to allow"
4768
                                   " promotion", errors.ECODE_STATE)
4769

    
4770
    self.old_flags = old_flags = (node.master_candidate,
4771
                                  node.drained, node.offline)
4772
    assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
4773
    self.old_role = old_role = self._F2R[old_flags]
4774

    
4775
    # Check for ineffective changes
4776
    for attr in self._FLAGS:
4777
      if (getattr(self.op, attr) == False and getattr(node, attr) == False):
4778
        self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
4779
        setattr(self.op, attr, None)
4780

    
4781
    # Past this point, any flag change to False means a transition
4782
    # away from the respective state, as only real changes are kept
4783

    
4784
    # TODO: We might query the real power state if it supports OOB
4785
    if _SupportsOob(self.cfg, node):
4786
      if self.op.offline is False and not (node.powered or
4787
                                           self.op.powered == True):
4788
        raise errors.OpPrereqError(("Node %s needs to be turned on before its"
4789
                                    " offline status can be reset") %
4790
                                   self.op.node_name)
4791
    elif self.op.powered is not None:
4792
      raise errors.OpPrereqError(("Unable to change powered state for node %s"
4793
                                  " as it does not support out-of-band"
4794
                                  " handling") % self.op.node_name)
4795

    
4796
    # If we're being deofflined/drained, we'll MC ourself if needed
4797
    if (self.op.drained == False or self.op.offline == False or
4798
        (self.op.master_capable and not node.master_capable)):
4799
      if _DecideSelfPromotion(self):
4800
        self.op.master_candidate = True
4801
        self.LogInfo("Auto-promoting node to master candidate")
4802

    
4803
    # If we're no longer master capable, we'll demote ourselves from MC
4804
    if self.op.master_capable == False and node.master_candidate:
4805
      self.LogInfo("Demoting from master candidate")
4806
      self.op.master_candidate = False
4807

    
4808
    # Compute new role
4809
    assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
4810
    if self.op.master_candidate:
4811
      new_role = self._ROLE_CANDIDATE
4812
    elif self.op.drained:
4813
      new_role = self._ROLE_DRAINED
4814
    elif self.op.offline:
4815
      new_role = self._ROLE_OFFLINE
4816
    elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
4817
      # False is still in new flags, which means we're un-setting (the
4818
      # only) True flag
4819
      new_role = self._ROLE_REGULAR
4820
    else: # no new flags, nothing, keep old role
4821
      new_role = old_role
4822

    
4823
    self.new_role = new_role
4824

    
4825
    if old_role == self._ROLE_OFFLINE and new_role != old_role:
4826
      # Trying to transition out of offline status
4827
      result = self.rpc.call_version([node.name])[node.name]
4828
      if result.fail_msg:
4829
        raise errors.OpPrereqError("Node %s is being de-offlined but fails"
4830
                                   " to report its version: %s" %
4831
                                   (node.name, result.fail_msg),
4832
                                   errors.ECODE_STATE)
4833
      else:
4834
        self.LogWarning("Transitioning node from offline to online state"
4835
                        " without using re-add. Please make sure the node"
4836
                        " is healthy!")
4837

    
4838
    if self.op.secondary_ip:
4839
      # Ok even without locking, because this can't be changed by any LU
4840
      master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
4841
      master_singlehomed = master.secondary_ip == master.primary_ip
4842
      if master_singlehomed and self.op.secondary_ip:
4843
        raise errors.OpPrereqError("Cannot change the secondary ip on a single"
4844
                                   " homed cluster", errors.ECODE_INVAL)
4845

    
4846
      if node.offline:
4847
        if self.affected_instances:
4848
          raise errors.OpPrereqError("Cannot change secondary ip: offline"
4849
                                     " node has instances (%s) configured"
4850
                                     " to use it" % self.affected_instances)
4851
      else:
4852
        # On online nodes, check that no instances are running, and that
4853
        # the node has the new ip and we can reach it.
4854
        for instance in self.affected_instances:
4855
          _CheckInstanceDown(self, instance, "cannot change secondary ip")
4856

    
4857
        _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
4858
        if master.name != node.name:
4859
          # check reachability from master secondary ip to new secondary ip
4860
          if not netutils.TcpPing(self.op.secondary_ip,
4861
                                  constants.DEFAULT_NODED_PORT,
4862
                                  source=master.secondary_ip):
4863
            raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4864
                                       " based ping to node daemon port",
4865
                                       errors.ECODE_ENVIRON)
4866

    
4867
    if self.op.ndparams:
4868
      new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
4869
      utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
4870
      self.new_ndparams = new_ndparams
4871

    
4872
  def Exec(self, feedback_fn):
4873
    """Modifies a node.
4874

4875
    """
4876
    node = self.node
4877
    old_role = self.old_role
4878
    new_role = self.new_role
4879

    
4880
    result = []
4881

    
4882
    if self.op.ndparams:
4883
      node.ndparams = self.new_ndparams
4884

    
4885
    if self.op.powered is not None:
4886
      node.powered = self.op.powered
4887

    
4888
    for attr in ["master_capable", "vm_capable"]:
4889
      val = getattr(self.op, attr)
4890
      if val is not None:
4891
        setattr(node, attr, val)
4892
        result.append((attr, str(val)))
4893

    
4894
    if new_role != old_role:
4895
      # Tell the node to demote itself, if no longer MC and not offline
4896
      if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
4897
        msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
4898
        if msg:
4899
          self.LogWarning("Node failed to demote itself: %s", msg)
4900

    
4901
      new_flags = self._R2F[new_role]
4902
      for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
4903
        if of != nf:
4904
          result.append((desc, str(nf)))
4905
      (node.master_candidate, node.drained, node.offline) = new_flags
4906

    
4907
      # we locked all nodes, we adjust the CP before updating this node
4908
      if self.lock_all:
4909
        _AdjustCandidatePool(self, [node.name])
4910

    
4911
    if self.op.secondary_ip:
4912
      node.secondary_ip = self.op.secondary_ip
4913
      result.append(("secondary_ip", self.op.secondary_ip))
4914

    
4915
    # this will trigger configuration file update, if needed
4916
    self.cfg.Update(node, feedback_fn)
4917

    
4918
    # this will trigger job queue propagation or cleanup if the mc
4919
    # flag changed
4920
    if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
4921
      self.context.ReaddNode(node)
4922

    
4923
    return result
4924

    
4925

    
4926
class LUNodePowercycle(NoHooksLU):
4927
  """Powercycles a node.
4928

4929
  """
4930
  REQ_BGL = False
4931

    
4932
  def CheckArguments(self):
4933
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4934
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
4935
      raise errors.OpPrereqError("The node is the master and the force"
4936
                                 " parameter was not set",
4937
                                 errors.ECODE_INVAL)
4938

    
4939
  def ExpandNames(self):
4940
    """Locking for PowercycleNode.
4941

4942
    This is a last-resort option and shouldn't block on other
4943
    jobs. Therefore, we grab no locks.
4944

4945
    """
4946
    self.needed_locks = {}
4947

    
4948
  def Exec(self, feedback_fn):
4949
    """Reboots a node.
4950

4951
    """
4952
    result = self.rpc.call_node_powercycle(self.op.node_name,
4953
                                           self.cfg.GetHypervisorType())
4954
    result.Raise("Failed to schedule the reboot")
4955
    return result.payload
4956

    
4957

    
4958
class LUClusterQuery(NoHooksLU):
4959
  """Query cluster configuration.
4960

4961
  """
4962
  REQ_BGL = False
4963

    
4964
  def ExpandNames(self):
4965
    self.needed_locks = {}
4966

    
4967
  def Exec(self, feedback_fn):
4968
    """Return cluster config.
4969

4970
    """
4971
    cluster = self.cfg.GetClusterInfo()
4972
    os_hvp = {}
4973

    
4974
    # Filter just for enabled hypervisors
4975
    for os_name, hv_dict in cluster.os_hvp.items():
4976
      os_hvp[os_name] = {}
4977
      for hv_name, hv_params in hv_dict.items():
4978
        if hv_name in cluster.enabled_hypervisors:
4979
          os_hvp[os_name][hv_name] = hv_params
4980

    
4981
    # Convert ip_family to ip_version
4982
    primary_ip_version = constants.IP4_VERSION
4983
    if cluster.primary_ip_family == netutils.IP6Address.family:
4984
      primary_ip_version = constants.IP6_VERSION
4985

    
4986
    result = {
4987
      "software_version": constants.RELEASE_VERSION,
4988
      "protocol_version": constants.PROTOCOL_VERSION,
4989
      "config_version": constants.CONFIG_VERSION,
4990
      "os_api_version": max(constants.OS_API_VERSIONS),
4991
      "export_version": constants.EXPORT_VERSION,
4992
      "architecture": (platform.architecture()[0], platform.machine()),
4993
      "name": cluster.cluster_name,
4994
      "master": cluster.master_node,
4995
      "default_hypervisor": cluster.enabled_hypervisors[0],
4996
      "enabled_hypervisors": cluster.enabled_hypervisors,
4997
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
4998
                        for hypervisor_name in cluster.enabled_hypervisors]),
4999
      "os_hvp": os_hvp,
5000
      "beparams": cluster.beparams,
5001
      "osparams": cluster.osparams,
5002
      "nicparams": cluster.nicparams,
5003
      "ndparams": cluster.ndparams,
5004
      "candidate_pool_size": cluster.candidate_pool_size,
5005
      "master_netdev": cluster.master_netdev,
5006
      "volume_group_name": cluster.volume_group_name,
5007
      "drbd_usermode_helper": cluster.drbd_usermode_helper,
5008
      "file_storage_dir": cluster.file_storage_dir,
5009
      "shared_file_storage_dir": cluster.shared_file_storage_dir,
5010
      "maintain_node_health": cluster.maintain_node_health,
5011
      "ctime": cluster.ctime,
5012
      "mtime": cluster.mtime,
5013
      "uuid": cluster.uuid,
5014
      "tags": list(cluster.GetTags()),
5015
      "uid_pool": cluster.uid_pool,
5016
      "default_iallocator": cluster.default_iallocator,
5017
      "reserved_lvs": cluster.reserved_lvs,
5018
      "primary_ip_version": primary_ip_version,
5019
      "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
5020
      "hidden_os": cluster.hidden_os,
5021
      "blacklisted_os": cluster.blacklisted_os,
5022
      }
5023

    
5024
    return result
5025

    
5026

    
5027
class LUClusterConfigQuery(NoHooksLU):
5028
  """Return configuration values.
5029

5030
  """
5031
  REQ_BGL = False
5032
  _FIELDS_DYNAMIC = utils.FieldSet()
5033
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
5034
                                  "watcher_pause", "volume_group_name")
5035

    
5036
  def CheckArguments(self):
5037
    _CheckOutputFields(static=self._FIELDS_STATIC,
5038
                       dynamic=self._FIELDS_DYNAMIC,
5039
                       selected=self.op.output_fields)
5040

    
5041
  def ExpandNames(self):
5042
    self.needed_locks = {}
5043

    
5044
  def Exec(self, feedback_fn):
5045
    """Dump a representation of the cluster config to the standard output.
5046

5047
    """
5048
    values = []
5049
    for field in self.op.output_fields:
5050
      if field == "cluster_name":
5051
        entry = self.cfg.GetClusterName()
5052
      elif field == "master_node":
5053
        entry = self.cfg.GetMasterNode()
5054
      elif field == "drain_flag":
5055
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
5056
      elif field == "watcher_pause":
5057
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
5058
      elif field == "volume_group_name":
5059
        entry = self.cfg.GetVGName()
5060
      else:
5061
        raise errors.ParameterError(field)
5062
      values.append(entry)
5063
    return values
5064

    
5065

    
5066
class LUInstanceActivateDisks(NoHooksLU):
5067
  """Bring up an instance's disks.
5068

5069
  """
5070
  REQ_BGL = False
5071

    
5072
  def ExpandNames(self):
5073
    self._ExpandAndLockInstance()
5074
    self.needed_locks[locking.LEVEL_NODE] = []
5075
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5076

    
5077
  def DeclareLocks(self, level):
5078
    if level == locking.LEVEL_NODE:
5079
      self._LockInstancesNodes()
5080

    
5081
  def CheckPrereq(self):
5082
    """Check prerequisites.
5083

5084
    This checks that the instance is in the cluster.
5085

5086
    """
5087
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5088
    assert self.instance is not None, \
5089
      "Cannot retrieve locked instance %s" % self.op.instance_name
5090
    _CheckNodeOnline(self, self.instance.primary_node)
5091

    
5092
  def Exec(self, feedback_fn):
5093
    """Activate the disks.
5094

5095
    """
5096
    disks_ok, disks_info = \
5097
              _AssembleInstanceDisks(self, self.instance,
5098
                                     ignore_size=self.op.ignore_size)
5099
    if not disks_ok:
5100
      raise errors.OpExecError("Cannot activate block devices")
5101

    
5102
    return disks_info
5103

    
5104

    
5105
def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
5106
                           ignore_size=False):
5107
  """Prepare the block devices for an instance.
5108

5109
  This sets up the block devices on all nodes.
5110

5111
  @type lu: L{LogicalUnit}
5112
  @param lu: the logical unit on whose behalf we execute
5113
  @type instance: L{objects.Instance}
5114
  @param instance: the instance for whose disks we assemble
5115
  @type disks: list of L{objects.Disk} or None
5116
  @param disks: which disks to assemble (or all, if None)
5117
  @type ignore_secondaries: boolean
5118
  @param ignore_secondaries: if true, errors on secondary nodes
5119
      won't result in an error return from the function
5120
  @type ignore_size: boolean
5121
  @param ignore_size: if true, the current known size of the disk
5122
      will not be used during the disk activation, useful for cases
5123
      when the size is wrong
5124
  @return: False if the operation failed, otherwise a list of
5125
      (host, instance_visible_name, node_visible_name)
5126
      with the mapping from node devices to instance devices
5127

5128
  """
5129
  device_info = []
5130
  disks_ok = True
5131
  iname = instance.name
5132
  disks = _ExpandCheckDisks(instance, disks)
5133

    
5134
  # With the two passes mechanism we try to reduce the window of
5135
  # opportunity for the race condition of switching DRBD to primary
5136
  # before handshaking occured, but we do not eliminate it
5137

    
5138
  # The proper fix would be to wait (with some limits) until the
5139
  # connection has been made and drbd transitions from WFConnection
5140
  # into any other network-connected state (Connected, SyncTarget,
5141
  # SyncSource, etc.)
5142

    
5143
  # 1st pass, assemble on all nodes in secondary mode
5144
  for idx, inst_disk in enumerate(disks):
5145
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
5146
      if ignore_size:
5147
        node_disk = node_disk.Copy()
5148
        node_disk.UnsetSize()
5149
      lu.cfg.SetDiskID(node_disk, node)
5150
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False, idx)
5151
      msg = result.fail_msg
5152
      if msg:
5153
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
5154
                           " (is_primary=False, pass=1): %s",
5155
                           inst_disk.iv_name, node, msg)
5156
        if not ignore_secondaries:
5157
          disks_ok = False
5158

    
5159
  # FIXME: race condition on drbd migration to primary
5160

    
5161
  # 2nd pass, do only the primary node
5162
  for idx, inst_disk in enumerate(disks):
5163
    dev_path = None
5164

    
5165
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
5166
      if node != instance.primary_node:
5167
        continue
5168
      if ignore_size:
5169
        node_disk = node_disk.Copy()
5170
        node_disk.UnsetSize()
5171
      lu.cfg.SetDiskID(node_disk, node)
5172
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True, idx)
5173
      msg = result.fail_msg
5174
      if msg:
5175
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
5176
                           " (is_primary=True, pass=2): %s",
5177
                           inst_disk.iv_name, node, msg)
5178
        disks_ok = False
5179
      else:
5180
        dev_path = result.payload
5181

    
5182
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
5183

    
5184
  # leave the disks configured for the primary node
5185
  # this is a workaround that would be fixed better by
5186
  # improving the logical/physical id handling
5187
  for disk in disks:
5188
    lu.cfg.SetDiskID(disk, instance.primary_node)
5189

    
5190
  return disks_ok, device_info
5191

    
5192

    
5193
def _StartInstanceDisks(lu, instance, force):
5194
  """Start the disks of an instance.
5195

5196
  """
5197
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
5198
                                           ignore_secondaries=force)
5199
  if not disks_ok:
5200
    _ShutdownInstanceDisks(lu, instance)
5201
    if force is not None and not force:
5202
      lu.proc.LogWarning("", hint="If the message above refers to a"
5203
                         " secondary node,"
5204
                         " you can retry the operation using '--force'.")
5205
    raise errors.OpExecError("Disk consistency error")
5206

    
5207

    
5208
class LUInstanceDeactivateDisks(NoHooksLU):
5209
  """Shutdown an instance's disks.
5210

5211
  """
5212
  REQ_BGL = False
5213

    
5214
  def ExpandNames(self):
5215
    self._ExpandAndLockInstance()
5216
    self.needed_locks[locking.LEVEL_NODE] = []
5217
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5218

    
5219
  def DeclareLocks(self, level):
5220
    if level == locking.LEVEL_NODE:
5221
      self._LockInstancesNodes()
5222

    
5223
  def CheckPrereq(self):
5224
    """Check prerequisites.
5225

5226
    This checks that the instance is in the cluster.
5227

5228
    """
5229
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5230
    assert self.instance is not None, \
5231
      "Cannot retrieve locked instance %s" % self.op.instance_name
5232

    
5233
  def Exec(self, feedback_fn):
5234
    """Deactivate the disks
5235

5236
    """
5237
    instance = self.instance
5238
    if self.op.force:
5239
      _ShutdownInstanceDisks(self, instance)
5240
    else:
5241
      _SafeShutdownInstanceDisks(self, instance)
5242

    
5243

    
5244
def _SafeShutdownInstanceDisks(lu, instance, disks=None):
5245
  """Shutdown block devices of an instance.
5246

5247
  This function checks if an instance is running, before calling
5248
  _ShutdownInstanceDisks.
5249

5250
  """
5251
  _CheckInstanceDown(lu, instance, "cannot shutdown disks")
5252
  _ShutdownInstanceDisks(lu, instance, disks=disks)
5253

    
5254

    
5255
def _ExpandCheckDisks(instance, disks):
5256
  """Return the instance disks selected by the disks list
5257

5258
  @type disks: list of L{objects.Disk} or None
5259
  @param disks: selected disks
5260
  @rtype: list of L{objects.Disk}
5261
  @return: selected instance disks to act on
5262

5263
  """
5264
  if disks is None:
5265
    return instance.disks
5266
  else:
5267
    if not set(disks).issubset(instance.disks):
5268
      raise errors.ProgrammerError("Can only act on disks belonging to the"
5269
                                   " target instance")
5270
    return disks
5271

    
5272

    
5273
def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
5274
  """Shutdown block devices of an instance.
5275

5276
  This does the shutdown on all nodes of the instance.
5277

5278
  If the ignore_primary is false, errors on the primary node are
5279
  ignored.
5280

5281
  """
5282
  all_result = True
5283
  disks = _ExpandCheckDisks(instance, disks)
5284

    
5285
  for disk in disks:
5286
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
5287
      lu.cfg.SetDiskID(top_disk, node)
5288
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
5289
      msg = result.fail_msg
5290
      if msg:
5291
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
5292
                      disk.iv_name, node, msg)
5293
        if ((node == instance.primary_node and not ignore_primary) or
5294
            (node != instance.primary_node and not result.offline)):
5295
          all_result = False
5296
  return all_result
5297

    
5298

    
5299
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
5300
  """Checks if a node has enough free memory.
5301

5302
  This function check if a given node has the needed amount of free
5303
  memory. In case the node has less memory or we cannot get the
5304
  information from the node, this function raise an OpPrereqError
5305
  exception.
5306

5307
  @type lu: C{LogicalUnit}
5308
  @param lu: a logical unit from which we get configuration data
5309
  @type node: C{str}
5310
  @param node: the node to check
5311
  @type reason: C{str}
5312
  @param reason: string to use in the error message
5313
  @type requested: C{int}
5314
  @param requested: the amount of memory in MiB to check for
5315
  @type hypervisor_name: C{str}
5316
  @param hypervisor_name: the hypervisor to ask for memory stats
5317
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
5318
      we cannot check the node
5319

5320
  """
5321
  nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name)
5322
  nodeinfo[node].Raise("Can't get data from node %s" % node,
5323
                       prereq=True, ecode=errors.ECODE_ENVIRON)
5324
  free_mem = nodeinfo[node].payload.get('memory_free', None)
5325
  if not isinstance(free_mem, int):
5326
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
5327
                               " was '%s'" % (node, free_mem),
5328
                               errors.ECODE_ENVIRON)
5329
  if requested > free_mem:
5330
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
5331
                               " needed %s MiB, available %s MiB" %
5332
                               (node, reason, requested, free_mem),
5333
                               errors.ECODE_NORES)
5334

    
5335

    
5336
def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
5337
  """Checks if nodes have enough free disk space in the all VGs.
5338

5339
  This function check if all given nodes have the needed amount of
5340
  free disk. In case any node has less disk or we cannot get the
5341
  information from the node, this function raise an OpPrereqError
5342
  exception.
5343

5344
  @type lu: C{LogicalUnit}
5345
  @param lu: a logical unit from which we get configuration data
5346
  @type nodenames: C{list}
5347
  @param nodenames: the list of node names to check
5348
  @type req_sizes: C{dict}
5349
  @param req_sizes: the hash of vg and corresponding amount of disk in
5350
      MiB to check for
5351
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
5352
      or we cannot check the node
5353

5354
  """
5355
  for vg, req_size in req_sizes.items():
5356
    _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
5357

    
5358

    
5359
def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
5360
  """Checks if nodes have enough free disk space in the specified VG.
5361

5362
  This function check if all given nodes have the needed amount of
5363
  free disk. In case any node has less disk or we cannot get the
5364
  information from the node, this function raise an OpPrereqError
5365
  exception.
5366

5367
  @type lu: C{LogicalUnit}
5368
  @param lu: a logical unit from which we get configuration data
5369
  @type nodenames: C{list}
5370
  @param nodenames: the list of node names to check
5371
  @type vg: C{str}
5372
  @param vg: the volume group to check
5373
  @type requested: C{int}
5374
  @param requested: the amount of disk in MiB to check for
5375
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
5376
      or we cannot check the node
5377

5378
  """
5379
  nodeinfo = lu.rpc.call_node_info(nodenames, vg, None)
5380
  for node in nodenames:
5381
    info = nodeinfo[node]
5382
    info.Raise("Cannot get current information from node %s" % node,
5383
               prereq=True, ecode=errors.ECODE_ENVIRON)
5384
    vg_free = info.payload.get("vg_free", None)
5385
    if not isinstance(vg_free, int):
5386
      raise errors.OpPrereqError("Can't compute free disk space on node"
5387
                                 " %s for vg %s, result was '%s'" %
5388
                                 (node, vg, vg_free), errors.ECODE_ENVIRON)
5389
    if requested > vg_free:
5390
      raise errors.OpPrereqError("Not enough disk space on target node %s"
5391
                                 " vg %s: required %d MiB, available %d MiB" %
5392
                                 (node, vg, requested, vg_free),
5393
                                 errors.ECODE_NORES)
5394

    
5395

    
5396
class LUInstanceStartup(LogicalUnit):
5397
  """Starts an instance.
5398

5399
  """
5400
  HPATH = "instance-start"
5401
  HTYPE = constants.HTYPE_INSTANCE
5402
  REQ_BGL = False
5403

    
5404
  def CheckArguments(self):
5405
    # extra beparams
5406
    if self.op.beparams:
5407
      # fill the beparams dict
5408
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5409

    
5410
  def ExpandNames(self):
5411
    self._ExpandAndLockInstance()
5412

    
5413
  def BuildHooksEnv(self):
5414
    """Build hooks env.
5415

5416
    This runs on master, primary and secondary nodes of the instance.
5417

5418
    """
5419
    env = {
5420
      "FORCE": self.op.force,
5421
      }
5422

    
5423
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5424

    
5425
    return env
5426

    
5427
  def BuildHooksNodes(self):
5428
    """Build hooks nodes.
5429

5430
    """
5431
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5432
    return (nl, nl)
5433

    
5434
  def CheckPrereq(self):
5435
    """Check prerequisites.
5436

5437
    This checks that the instance is in the cluster.
5438

5439
    """
5440
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5441
    assert self.instance is not None, \
5442
      "Cannot retrieve locked instance %s" % self.op.instance_name
5443

    
5444
    # extra hvparams
5445
    if self.op.hvparams:
5446
      # check hypervisor parameter syntax (locally)
5447
      cluster = self.cfg.GetClusterInfo()
5448
      utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5449
      filled_hvp = cluster.FillHV(instance)
5450
      filled_hvp.update(self.op.hvparams)
5451
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
5452
      hv_type.CheckParameterSyntax(filled_hvp)
5453
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
5454

    
5455
    self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
5456

    
5457
    if self.primary_offline and self.op.ignore_offline_nodes:
5458
      self.proc.LogWarning("Ignoring offline primary node")
5459

    
5460
      if self.op.hvparams or self.op.beparams:
5461
        self.proc.LogWarning("Overridden parameters are ignored")
5462
    else:
5463
      _CheckNodeOnline(self, instance.primary_node)
5464

    
5465
      bep = self.cfg.GetClusterInfo().FillBE(instance)
5466

    
5467
      # check bridges existence
5468
      _CheckInstanceBridgesExist(self, instance)
5469

    
5470
      remote_info = self.rpc.call_instance_info(instance.primary_node,
5471
                                                instance.name,
5472
                                                instance.hypervisor)
5473
      remote_info.Raise("Error checking node %s" % instance.primary_node,
5474
                        prereq=True, ecode=errors.ECODE_ENVIRON)
5475
      if not remote_info.payload: # not running already
5476
        _CheckNodeFreeMemory(self, instance.primary_node,
5477
                             "starting instance %s" % instance.name,
5478
                             bep[constants.BE_MEMORY], instance.hypervisor)
5479

    
5480
  def Exec(self, feedback_fn):
5481
    """Start the instance.
5482

5483
    """
5484
    instance = self.instance
5485
    force = self.op.force
5486

    
5487
    if not self.op.no_remember:
5488
      self.cfg.MarkInstanceUp(instance.name)
5489

    
5490
    if self.primary_offline:
5491
      assert self.op.ignore_offline_nodes
5492
      self.proc.LogInfo("Primary node offline, marked instance as started")
5493
    else:
5494
      node_current = instance.primary_node
5495

    
5496
      _StartInstanceDisks(self, instance, force)
5497

    
5498
      result = self.rpc.call_instance_start(node_current, instance,
5499
                                            self.op.hvparams, self.op.beparams)
5500
      msg = result.fail_msg
5501
      if msg:
5502
        _ShutdownInstanceDisks(self, instance)
5503
        raise errors.OpExecError("Could not start instance: %s" % msg)
5504

    
5505

    
5506
class LUInstanceReboot(LogicalUnit):
5507
  """Reboot an instance.
5508

5509
  """
5510
  HPATH = "instance-reboot"
5511
  HTYPE = constants.HTYPE_INSTANCE
5512
  REQ_BGL = False
5513

    
5514
  def ExpandNames(self):
5515
    self._ExpandAndLockInstance()
5516

    
5517
  def BuildHooksEnv(self):
5518
    """Build hooks env.
5519

5520
    This runs on master, primary and secondary nodes of the instance.
5521

5522
    """
5523
    env = {
5524
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
5525
      "REBOOT_TYPE": self.op.reboot_type,
5526
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5527
      }
5528

    
5529
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5530

    
5531
    return env
5532

    
5533
  def BuildHooksNodes(self):
5534
    """Build hooks nodes.
5535

5536
    """
5537
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5538
    return (nl, nl)
5539

    
5540
  def CheckPrereq(self):
5541
    """Check prerequisites.
5542

5543
    This checks that the instance is in the cluster.
5544

5545
    """
5546
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5547
    assert self.instance is not None, \
5548
      "Cannot retrieve locked instance %s" % self.op.instance_name
5549

    
5550
    _CheckNodeOnline(self, instance.primary_node)
5551

    
5552
    # check bridges existence
5553
    _CheckInstanceBridgesExist(self, instance)
5554

    
5555
  def Exec(self, feedback_fn):
5556
    """Reboot the instance.
5557

5558
    """
5559
    instance = self.instance
5560
    ignore_secondaries = self.op.ignore_secondaries
5561
    reboot_type = self.op.reboot_type
5562

    
5563
    remote_info = self.rpc.call_instance_info(instance.primary_node,
5564
                                              instance.name,
5565
                                              instance.hypervisor)
5566
    remote_info.Raise("Error checking node %s" % instance.primary_node)
5567
    instance_running = bool(remote_info.payload)
5568

    
5569
    node_current = instance.primary_node
5570

    
5571
    if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
5572
                                            constants.INSTANCE_REBOOT_HARD]:
5573
      for disk in instance.disks:
5574
        self.cfg.SetDiskID(disk, node_current)
5575
      result = self.rpc.call_instance_reboot(node_current, instance,
5576
                                             reboot_type,
5577
                                             self.op.shutdown_timeout)
5578
      result.Raise("Could not reboot instance")
5579
    else:
5580
      if instance_running:
5581
        result = self.rpc.call_instance_shutdown(node_current, instance,
5582
                                                 self.op.shutdown_timeout)
5583
        result.Raise("Could not shutdown instance for full reboot")
5584
        _ShutdownInstanceDisks(self, instance)
5585
      else:
5586
        self.LogInfo("Instance %s was already stopped, starting now",
5587
                     instance.name)
5588
      _StartInstanceDisks(self, instance, ignore_secondaries)
5589
      result = self.rpc.call_instance_start(node_current, instance, None, None)
5590
      msg = result.fail_msg
5591
      if msg:
5592
        _ShutdownInstanceDisks(self, instance)
5593
        raise errors.OpExecError("Could not start instance for"
5594
                                 " full reboot: %s" % msg)
5595

    
5596
    self.cfg.MarkInstanceUp(instance.name)
5597

    
5598

    
5599
class LUInstanceShutdown(LogicalUnit):
5600
  """Shutdown an instance.
5601

5602
  """
5603
  HPATH = "instance-stop"
5604
  HTYPE = constants.HTYPE_INSTANCE
5605
  REQ_BGL = False
5606

    
5607
  def ExpandNames(self):
5608
    self._ExpandAndLockInstance()
5609

    
5610
  def BuildHooksEnv(self):
5611
    """Build hooks env.
5612

5613
    This runs on master, primary and secondary nodes of the instance.
5614

5615
    """
5616
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5617
    env["TIMEOUT"] = self.op.timeout
5618
    return env
5619

    
5620
  def BuildHooksNodes(self):
5621
    """Build hooks nodes.
5622

5623
    """
5624
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5625
    return (nl, nl)
5626

    
5627
  def CheckPrereq(self):
5628
    """Check prerequisites.
5629

5630
    This checks that the instance is in the cluster.
5631

5632
    """
5633
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5634
    assert self.instance is not None, \
5635
      "Cannot retrieve locked instance %s" % self.op.instance_name
5636

    
5637
    self.primary_offline = \
5638
      self.cfg.GetNodeInfo(self.instance.primary_node).offline
5639

    
5640
    if self.primary_offline and self.op.ignore_offline_nodes:
5641
      self.proc.LogWarning("Ignoring offline primary node")
5642
    else:
5643
      _CheckNodeOnline(self, self.instance.primary_node)
5644

    
5645
  def Exec(self, feedback_fn):
5646
    """Shutdown the instance.
5647

5648
    """
5649
    instance = self.instance
5650
    node_current = instance.primary_node
5651
    timeout = self.op.timeout
5652

    
5653
    if not self.op.no_remember:
5654
      self.cfg.MarkInstanceDown(instance.name)
5655

    
5656
    if self.primary_offline:
5657
      assert self.op.ignore_offline_nodes
5658
      self.proc.LogInfo("Primary node offline, marked instance as stopped")
5659
    else:
5660
      result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
5661
      msg = result.fail_msg
5662
      if msg:
5663
        self.proc.LogWarning("Could not shutdown instance: %s" % msg)
5664

    
5665
      _ShutdownInstanceDisks(self, instance)
5666

    
5667

    
5668
class LUInstanceReinstall(LogicalUnit):
5669
  """Reinstall an instance.
5670

5671
  """
5672
  HPATH = "instance-reinstall"
5673
  HTYPE = constants.HTYPE_INSTANCE
5674
  REQ_BGL = False
5675

    
5676
  def ExpandNames(self):
5677
    self._ExpandAndLockInstance()
5678

    
5679
  def BuildHooksEnv(self):
5680
    """Build hooks env.
5681

5682
    This runs on master, primary and secondary nodes of the instance.
5683

5684
    """
5685
    return _BuildInstanceHookEnvByObject(self, self.instance)
5686

    
5687
  def BuildHooksNodes(self):
5688
    """Build hooks nodes.
5689

5690
    """
5691
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5692
    return (nl, nl)
5693

    
5694
  def CheckPrereq(self):
5695
    """Check prerequisites.
5696

5697
    This checks that the instance is in the cluster and is not running.
5698

5699
    """
5700
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5701
    assert instance is not None, \
5702
      "Cannot retrieve locked instance %s" % self.op.instance_name
5703
    _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
5704
                     " offline, cannot reinstall")
5705
    for node in instance.secondary_nodes:
5706
      _CheckNodeOnline(self, node, "Instance secondary node offline,"
5707
                       " cannot reinstall")
5708

    
5709
    if instance.disk_template == constants.DT_DISKLESS:
5710
      raise errors.OpPrereqError("Instance '%s' has no disks" %
5711
                                 self.op.instance_name,
5712
                                 errors.ECODE_INVAL)
5713
    _CheckInstanceDown(self, instance, "cannot reinstall")
5714

    
5715
    if self.op.os_type is not None:
5716
      # OS verification
5717
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
5718
      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
5719
      instance_os = self.op.os_type
5720
    else:
5721
      instance_os = instance.os
5722

    
5723
    nodelist = list(instance.all_nodes)
5724

    
5725
    if self.op.osparams:
5726
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
5727
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
5728
      self.os_inst = i_osdict # the new dict (without defaults)
5729
    else:
5730
      self.os_inst = None
5731

    
5732
    self.instance = instance
5733

    
5734
  def Exec(self, feedback_fn):
5735
    """Reinstall the instance.
5736

5737
    """
5738
    inst = self.instance
5739

    
5740
    if self.op.os_type is not None:
5741
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
5742
      inst.os = self.op.os_type
5743
      # Write to configuration
5744
      self.cfg.Update(inst, feedback_fn)
5745

    
5746
    _StartInstanceDisks(self, inst, None)
5747
    try:
5748
      feedback_fn("Running the instance OS create scripts...")
5749
      # FIXME: pass debug option from opcode to backend
5750
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
5751
                                             self.op.debug_level,
5752
                                             osparams=self.os_inst)
5753
      result.Raise("Could not install OS for instance %s on node %s" %
5754
                   (inst.name, inst.primary_node))
5755
    finally:
5756
      _ShutdownInstanceDisks(self, inst)
5757

    
5758

    
5759
class LUInstanceRecreateDisks(LogicalUnit):
5760
  """Recreate an instance's missing disks.
5761

5762
  """
5763
  HPATH = "instance-recreate-disks"
5764
  HTYPE = constants.HTYPE_INSTANCE
5765
  REQ_BGL = False
5766

    
5767
  def CheckArguments(self):
5768
    # normalise the disk list
5769
    self.op.disks = sorted(frozenset(self.op.disks))
5770

    
5771
  def ExpandNames(self):
5772
    self._ExpandAndLockInstance()
5773
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5774
    if self.op.nodes:
5775
      self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
5776
      self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
5777
    else:
5778
      self.needed_locks[locking.LEVEL_NODE] = []
5779

    
5780
  def DeclareLocks(self, level):
5781
    if level == locking.LEVEL_NODE:
5782
      # if we replace the nodes, we only need to lock the old primary,
5783
      # otherwise we need to lock all nodes for disk re-creation
5784
      primary_only = bool(self.op.nodes)
5785
      self._LockInstancesNodes(primary_only=primary_only)
5786

    
5787
  def BuildHooksEnv(self):
5788
    """Build hooks env.
5789

5790
    This runs on master, primary and secondary nodes of the instance.
5791

5792
    """
5793
    return _BuildInstanceHookEnvByObject(self, self.instance)
5794

    
5795
  def BuildHooksNodes(self):
5796
    """Build hooks nodes.
5797

5798
    """
5799
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5800
    return (nl, nl)
5801

    
5802
  def CheckPrereq(self):
5803
    """Check prerequisites.
5804

5805
    This checks that the instance is in the cluster and is not running.
5806

5807
    """
5808
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5809
    assert instance is not None, \
5810
      "Cannot retrieve locked instance %s" % self.op.instance_name
5811
    if self.op.nodes:
5812
      if len(self.op.nodes) != len(instance.all_nodes):
5813
        raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
5814
                                   " %d replacement nodes were specified" %
5815
                                   (instance.name, len(instance.all_nodes),
5816
                                    len(self.op.nodes)),
5817
                                   errors.ECODE_INVAL)
5818
      assert instance.disk_template != constants.DT_DRBD8 or \
5819
          len(self.op.nodes) == 2
5820
      assert instance.disk_template != constants.DT_PLAIN or \
5821
          len(self.op.nodes) == 1
5822
      primary_node = self.op.nodes[0]
5823
    else:
5824
      primary_node = instance.primary_node
5825
    _CheckNodeOnline(self, primary_node)
5826

    
5827
    if instance.disk_template == constants.DT_DISKLESS:
5828
      raise errors.OpPrereqError("Instance '%s' has no disks" %
5829
                                 self.op.instance_name, errors.ECODE_INVAL)
5830
    # if we replace nodes *and* the old primary is offline, we don't
5831
    # check
5832
    assert instance.primary_node in self.needed_locks[locking.LEVEL_NODE]
5833
    old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
5834
    if not (self.op.nodes and old_pnode.offline):
5835
      _CheckInstanceDown(self, instance, "cannot recreate disks")
5836

    
5837
    if not self.op.disks:
5838
      self.op.disks = range(len(instance.disks))
5839
    else:
5840
      for idx in self.op.disks:
5841
        if idx >= len(instance.disks):
5842
          raise errors.OpPrereqError("Invalid disk index '%s'" % idx,
5843
                                     errors.ECODE_INVAL)
5844
    if self.op.disks != range(len(instance.disks)) and self.op.nodes:
5845
      raise errors.OpPrereqError("Can't recreate disks partially and"
5846
                                 " change the nodes at the same time",
5847
                                 errors.ECODE_INVAL)
5848
    self.instance = instance
5849

    
5850
  def Exec(self, feedback_fn):
5851
    """Recreate the disks.
5852

5853
    """
5854
    # change primary node, if needed
5855
    if self.op.nodes:
5856
      self.instance.primary_node = self.op.nodes[0]
5857
      self.LogWarning("Changing the instance's nodes, you will have to"
5858
                      " remove any disks left on the older nodes manually")
5859

    
5860
    to_skip = []
5861
    for idx, disk in enumerate(self.instance.disks):
5862
      if idx not in self.op.disks: # disk idx has not been passed in
5863
        to_skip.append(idx)
5864
        continue
5865
      # update secondaries for disks, if needed
5866
      if self.op.nodes:
5867
        if disk.dev_type == constants.LD_DRBD8:
5868
          # need to update the nodes
5869
          assert len(self.op.nodes) == 2
5870
          logical_id = list(disk.logical_id)
5871
          logical_id[0] = self.op.nodes[0]
5872
          logical_id[1] = self.op.nodes[1]
5873
          disk.logical_id = tuple(logical_id)
5874

    
5875
    if self.op.nodes:
5876
      self.cfg.Update(self.instance, feedback_fn)
5877

    
5878
    _CreateDisks(self, self.instance, to_skip=to_skip)
5879

    
5880

    
5881
class LUInstanceRename(LogicalUnit):
5882
  """Rename an instance.
5883

5884
  """
5885
  HPATH = "instance-rename"
5886
  HTYPE = constants.HTYPE_INSTANCE
5887

    
5888
  def CheckArguments(self):
5889
    """Check arguments.
5890

5891
    """
5892
    if self.op.ip_check and not self.op.name_check:
5893
      # TODO: make the ip check more flexible and not depend on the name check
5894
      raise errors.OpPrereqError("IP address check requires a name check",
5895
                                 errors.ECODE_INVAL)
5896

    
5897
  def BuildHooksEnv(self):
5898
    """Build hooks env.
5899

5900
    This runs on master, primary and secondary nodes of the instance.
5901

5902
    """
5903
    env = _BuildInstanceHookEnvByObject(self, self.instance)
5904
    env["INSTANCE_NEW_NAME"] = self.op.new_name
5905
    return env
5906

    
5907
  def BuildHooksNodes(self):
5908
    """Build hooks nodes.
5909

5910
    """
5911
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5912
    return (nl, nl)
5913

    
5914
  def CheckPrereq(self):
5915
    """Check prerequisites.
5916

5917
    This checks that the instance is in the cluster and is not running.
5918

5919
    """
5920
    self.op.instance_name = _ExpandInstanceName(self.cfg,
5921
                                                self.op.instance_name)
5922
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5923
    assert instance is not None
5924
    _CheckNodeOnline(self, instance.primary_node)
5925
    _CheckInstanceDown(self, instance, "cannot rename")
5926
    self.instance = instance
5927

    
5928
    new_name = self.op.new_name
5929
    if self.op.name_check:
5930
      hostname = netutils.GetHostname(name=new_name)
5931
      if hostname != new_name:
5932
        self.LogInfo("Resolved given name '%s' to '%s'", new_name,
5933
                     hostname.name)
5934
      if not utils.MatchNameComponent(self.op.new_name, [hostname.name]):
5935
        raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
5936
                                    " same as given hostname '%s'") %
5937
                                    (hostname.name, self.op.new_name),
5938
                                    errors.ECODE_INVAL)
5939
      new_name = self.op.new_name = hostname.name
5940
      if (self.op.ip_check and
5941
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
5942
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
5943
                                   (hostname.ip, new_name),
5944
                                   errors.ECODE_NOTUNIQUE)
5945

    
5946
    instance_list = self.cfg.GetInstanceList()
5947
    if new_name in instance_list and new_name != instance.name:
5948
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
5949
                                 new_name, errors.ECODE_EXISTS)
5950

    
5951
  def Exec(self, feedback_fn):
5952
    """Rename the instance.
5953

5954
    """
5955
    inst = self.instance
5956
    old_name = inst.name
5957

    
5958
    rename_file_storage = False
5959
    if (inst.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE) and
5960
        self.op.new_name != inst.name):
5961
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5962
      rename_file_storage = True
5963

    
5964
    self.cfg.RenameInstance(inst.name, self.op.new_name)
5965
    # Change the instance lock. This is definitely safe while we hold the BGL.
5966
    # Otherwise the new lock would have to be added in acquired mode.
5967
    assert self.REQ_BGL
5968
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
5969
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
5970

    
5971
    # re-read the instance from the configuration after rename
5972
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
5973

    
5974
    if rename_file_storage:
5975
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
5976
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
5977
                                                     old_file_storage_dir,
5978
                                                     new_file_storage_dir)
5979
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
5980
                   " (but the instance has been renamed in Ganeti)" %
5981
                   (inst.primary_node, old_file_storage_dir,
5982
                    new_file_storage_dir))
5983

    
5984
    _StartInstanceDisks(self, inst, None)
5985
    try:
5986
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
5987
                                                 old_name, self.op.debug_level)
5988
      msg = result.fail_msg
5989
      if msg:
5990
        msg = ("Could not run OS rename script for instance %s on node %s"
5991
               " (but the instance has been renamed in Ganeti): %s" %
5992
               (inst.name, inst.primary_node, msg))
5993
        self.proc.LogWarning(msg)
5994
    finally:
5995
      _ShutdownInstanceDisks(self, inst)
5996

    
5997
    return inst.name
5998

    
5999

    
6000
class LUInstanceRemove(LogicalUnit):
6001
  """Remove an instance.
6002

6003
  """
6004
  HPATH = "instance-remove"
6005
  HTYPE = constants.HTYPE_INSTANCE
6006
  REQ_BGL = False
6007

    
6008
  def ExpandNames(self):
6009
    self._ExpandAndLockInstance()
6010
    self.needed_locks[locking.LEVEL_NODE] = []
6011
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6012

    
6013
  def DeclareLocks(self, level):
6014
    if level == locking.LEVEL_NODE:
6015
      self._LockInstancesNodes()
6016

    
6017
  def BuildHooksEnv(self):
6018
    """Build hooks env.
6019

6020
    This runs on master, primary and secondary nodes of the instance.
6021

6022
    """
6023
    env = _BuildInstanceHookEnvByObject(self, self.instance)
6024
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
6025
    return env
6026

    
6027
  def BuildHooksNodes(self):
6028
    """Build hooks nodes.
6029

6030
    """
6031
    nl = [self.cfg.GetMasterNode()]
6032
    nl_post = list(self.instance.all_nodes) + nl
6033
    return (nl, nl_post)
6034

    
6035
  def CheckPrereq(self):
6036
    """Check prerequisites.
6037

6038
    This checks that the instance is in the cluster.
6039

6040
    """
6041
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6042
    assert self.instance is not None, \
6043
      "Cannot retrieve locked instance %s" % self.op.instance_name
6044

    
6045
  def Exec(self, feedback_fn):
6046
    """Remove the instance.
6047

6048
    """
6049
    instance = self.instance
6050
    logging.info("Shutting down instance %s on node %s",
6051
                 instance.name, instance.primary_node)
6052

    
6053
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
6054
                                             self.op.shutdown_timeout)
6055
    msg = result.fail_msg
6056
    if msg:
6057
      if self.op.ignore_failures:
6058
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
6059
      else:
6060
        raise errors.OpExecError("Could not shutdown instance %s on"
6061
                                 " node %s: %s" %
6062
                                 (instance.name, instance.primary_node, msg))
6063

    
6064
    _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
6065

    
6066

    
6067
def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
6068
  """Utility function to remove an instance.
6069

6070
  """
6071
  logging.info("Removing block devices for instance %s", instance.name)
6072

    
6073
  if not _RemoveDisks(lu, instance):
6074
    if not ignore_failures:
6075
      raise errors.OpExecError("Can't remove instance's disks")
6076
    feedback_fn("Warning: can't remove instance's disks")
6077

    
6078
  logging.info("Removing instance %s out of cluster config", instance.name)
6079

    
6080
  lu.cfg.RemoveInstance(instance.name)
6081

    
6082
  assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
6083
    "Instance lock removal conflict"
6084

    
6085
  # Remove lock for the instance
6086
  lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
6087

    
6088

    
6089
class LUInstanceQuery(NoHooksLU):
6090
  """Logical unit for querying instances.
6091

6092
  """
6093
  # pylint: disable-msg=W0142
6094
  REQ_BGL = False
6095

    
6096
  def CheckArguments(self):
6097
    self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
6098
                             self.op.output_fields, self.op.use_locking)
6099

    
6100
  def ExpandNames(self):
6101
    self.iq.ExpandNames(self)
6102

    
6103
  def DeclareLocks(self, level):
6104
    self.iq.DeclareLocks(self, level)
6105

    
6106
  def Exec(self, feedback_fn):
6107
    return self.iq.OldStyleQuery(self)
6108

    
6109

    
6110
class LUInstanceFailover(LogicalUnit):
6111
  """Failover an instance.
6112

6113
  """
6114
  HPATH = "instance-failover"
6115
  HTYPE = constants.HTYPE_INSTANCE
6116
  REQ_BGL = False
6117

    
6118
  def CheckArguments(self):
6119
    """Check the arguments.
6120

6121
    """
6122
    self.iallocator = getattr(self.op, "iallocator", None)
6123
    self.target_node = getattr(self.op, "target_node", None)
6124

    
6125
  def ExpandNames(self):
6126
    self._ExpandAndLockInstance()
6127

    
6128
    if self.op.target_node is not None:
6129
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6130

    
6131
    self.needed_locks[locking.LEVEL_NODE] = []
6132
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6133

    
6134
    ignore_consistency = self.op.ignore_consistency
6135
    shutdown_timeout = self.op.shutdown_timeout
6136
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
6137
                                       cleanup=False,
6138
                                       failover=True,
6139
                                       ignore_consistency=ignore_consistency,
6140
                                       shutdown_timeout=shutdown_timeout)
6141
    self.tasklets = [self._migrater]
6142

    
6143
  def DeclareLocks(self, level):
6144
    if level == locking.LEVEL_NODE:
6145
      instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
6146
      if instance.disk_template in constants.DTS_EXT_MIRROR:
6147
        if self.op.target_node is None:
6148
          self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6149
        else:
6150
          self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
6151
                                                   self.op.target_node]
6152
        del self.recalculate_locks[locking.LEVEL_NODE]
6153
      else:
6154
        self._LockInstancesNodes()
6155

    
6156
  def BuildHooksEnv(self):
6157
    """Build hooks env.
6158

6159
    This runs on master, primary and secondary nodes of the instance.
6160

6161
    """
6162
    instance = self._migrater.instance
6163
    source_node = instance.primary_node
6164
    target_node = self.op.target_node
6165
    env = {
6166
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
6167
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6168
      "OLD_PRIMARY": source_node,
6169
      "NEW_PRIMARY": target_node,
6170
      }
6171

    
6172
    if instance.disk_template in constants.DTS_INT_MIRROR:
6173
      env["OLD_SECONDARY"] = instance.secondary_nodes[0]
6174
      env["NEW_SECONDARY"] = source_node
6175
    else:
6176
      env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
6177

    
6178
    env.update(_BuildInstanceHookEnvByObject(self, instance))
6179

    
6180
    return env
6181

    
6182
  def BuildHooksNodes(self):
6183
    """Build hooks nodes.
6184

6185
    """
6186
    instance = self._migrater.instance
6187
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
6188
    return (nl, nl + [instance.primary_node])
6189

    
6190

    
6191
class LUInstanceMigrate(LogicalUnit):
6192
  """Migrate an instance.
6193

6194
  This is migration without shutting down, compared to the failover,
6195
  which is done with shutdown.
6196

6197
  """
6198
  HPATH = "instance-migrate"
6199
  HTYPE = constants.HTYPE_INSTANCE
6200
  REQ_BGL = False
6201

    
6202
  def ExpandNames(self):
6203
    self._ExpandAndLockInstance()
6204

    
6205
    if self.op.target_node is not None:
6206
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6207

    
6208
    self.needed_locks[locking.LEVEL_NODE] = []
6209
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6210

    
6211
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
6212
                                       cleanup=self.op.cleanup,
6213
                                       failover=False,
6214
                                       fallback=self.op.allow_failover)
6215
    self.tasklets = [self._migrater]
6216

    
6217
  def DeclareLocks(self, level):
6218
    if level == locking.LEVEL_NODE:
6219
      instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
6220
      if instance.disk_template in constants.DTS_EXT_MIRROR:
6221
        if self.op.target_node is None:
6222
          self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6223
        else:
6224
          self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
6225
                                                   self.op.target_node]
6226
        del self.recalculate_locks[locking.LEVEL_NODE]
6227
      else:
6228
        self._LockInstancesNodes()
6229

    
6230
  def BuildHooksEnv(self):
6231
    """Build hooks env.
6232

6233
    This runs on master, primary and secondary nodes of the instance.
6234

6235
    """
6236
    instance = self._migrater.instance
6237
    source_node = instance.primary_node
6238
    target_node = self.op.target_node
6239
    env = _BuildInstanceHookEnvByObject(self, instance)
6240
    env.update({
6241
      "MIGRATE_LIVE": self._migrater.live,
6242
      "MIGRATE_CLEANUP": self.op.cleanup,
6243
      "OLD_PRIMARY": source_node,
6244
      "NEW_PRIMARY": target_node,
6245
      })
6246

    
6247
    if instance.disk_template in constants.DTS_INT_MIRROR:
6248
      env["OLD_SECONDARY"] = target_node
6249
      env["NEW_SECONDARY"] = source_node
6250
    else:
6251
      env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None
6252

    
6253
    return env
6254

    
6255
  def BuildHooksNodes(self):
6256
    """Build hooks nodes.
6257

6258
    """
6259
    instance = self._migrater.instance
6260
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
6261
    return (nl, nl + [instance.primary_node])
6262

    
6263

    
6264
class LUInstanceMove(LogicalUnit):
6265
  """Move an instance by data-copying.
6266

6267
  """
6268
  HPATH = "instance-move"
6269
  HTYPE = constants.HTYPE_INSTANCE
6270
  REQ_BGL = False
6271

    
6272
  def ExpandNames(self):
6273
    self._ExpandAndLockInstance()
6274
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6275
    self.op.target_node = target_node
6276
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
6277
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6278

    
6279
  def DeclareLocks(self, level):
6280
    if level == locking.LEVEL_NODE:
6281
      self._LockInstancesNodes(primary_only=True)
6282

    
6283
  def BuildHooksEnv(self):
6284
    """Build hooks env.
6285

6286
    This runs on master, primary and secondary nodes of the instance.
6287

6288
    """
6289
    env = {
6290
      "TARGET_NODE": self.op.target_node,
6291
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6292
      }
6293
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6294
    return env
6295

    
6296
  def BuildHooksNodes(self):
6297
    """Build hooks nodes.
6298

6299
    """
6300
    nl = [
6301
      self.cfg.GetMasterNode(),
6302
      self.instance.primary_node,
6303
      self.op.target_node,
6304
      ]
6305
    return (nl, nl)
6306

    
6307
  def CheckPrereq(self):
6308
    """Check prerequisites.
6309

6310
    This checks that the instance is in the cluster.
6311

6312
    """
6313
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6314
    assert self.instance is not None, \
6315
      "Cannot retrieve locked instance %s" % self.op.instance_name
6316

    
6317
    node = self.cfg.GetNodeInfo(self.op.target_node)
6318
    assert node is not None, \
6319
      "Cannot retrieve locked node %s" % self.op.target_node
6320

    
6321
    self.target_node = target_node = node.name
6322

    
6323
    if target_node == instance.primary_node:
6324
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
6325
                                 (instance.name, target_node),
6326
                                 errors.ECODE_STATE)
6327

    
6328
    bep = self.cfg.GetClusterInfo().FillBE(instance)
6329

    
6330
    for idx, dsk in enumerate(instance.disks):
6331
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
6332
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
6333
                                   " cannot copy" % idx, errors.ECODE_STATE)
6334

    
6335
    _CheckNodeOnline(self, target_node)
6336
    _CheckNodeNotDrained(self, target_node)
6337
    _CheckNodeVmCapable(self, target_node)
6338

    
6339
    if instance.admin_up:
6340
      # check memory requirements on the secondary node
6341
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
6342
                           instance.name, bep[constants.BE_MEMORY],
6343
                           instance.hypervisor)
6344
    else:
6345
      self.LogInfo("Not checking memory on the secondary node as"
6346
                   " instance will not be started")
6347

    
6348
    # check bridge existance
6349
    _CheckInstanceBridgesExist(self, instance, node=target_node)
6350

    
6351
  def Exec(self, feedback_fn):
6352
    """Move an instance.
6353

6354
    The move is done by shutting it down on its present node, copying
6355
    the data over (slow) and starting it on the new node.
6356

6357
    """
6358
    instance = self.instance
6359

    
6360
    source_node = instance.primary_node
6361
    target_node = self.target_node
6362

    
6363
    self.LogInfo("Shutting down instance %s on source node %s",
6364
                 instance.name, source_node)
6365

    
6366
    result = self.rpc.call_instance_shutdown(source_node, instance,
6367
                                             self.op.shutdown_timeout)
6368
    msg = result.fail_msg
6369
    if msg:
6370
      if self.op.ignore_consistency:
6371
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
6372
                             " Proceeding anyway. Please make sure node"
6373
                             " %s is down. Error details: %s",
6374
                             instance.name, source_node, source_node, msg)
6375
      else:
6376
        raise errors.OpExecError("Could not shutdown instance %s on"
6377
                                 " node %s: %s" %
6378
                                 (instance.name, source_node, msg))
6379

    
6380
    # create the target disks
6381
    try:
6382
      _CreateDisks(self, instance, target_node=target_node)
6383
    except errors.OpExecError:
6384
      self.LogWarning("Device creation failed, reverting...")
6385
      try:
6386
        _RemoveDisks(self, instance, target_node=target_node)
6387
      finally:
6388
        self.cfg.ReleaseDRBDMinors(instance.name)
6389
        raise
6390

    
6391
    cluster_name = self.cfg.GetClusterInfo().cluster_name
6392

    
6393
    errs = []
6394
    # activate, get path, copy the data over
6395
    for idx, disk in enumerate(instance.disks):
6396
      self.LogInfo("Copying data for disk %d", idx)
6397
      result = self.rpc.call_blockdev_assemble(target_node, disk,
6398
                                               instance.name, True, idx)
6399
      if result.fail_msg:
6400
        self.LogWarning("Can't assemble newly created disk %d: %s",
6401
                        idx, result.fail_msg)
6402
        errs.append(result.fail_msg)
6403
        break
6404
      dev_path = result.payload
6405
      result = self.rpc.call_blockdev_export(source_node, disk,
6406
                                             target_node, dev_path,
6407
                                             cluster_name)
6408
      if result.fail_msg:
6409
        self.LogWarning("Can't copy data over for disk %d: %s",
6410
                        idx, result.fail_msg)
6411
        errs.append(result.fail_msg)
6412
        break
6413

    
6414
    if errs:
6415
      self.LogWarning("Some disks failed to copy, aborting")
6416
      try:
6417
        _RemoveDisks(self, instance, target_node=target_node)
6418
      finally:
6419
        self.cfg.ReleaseDRBDMinors(instance.name)
6420
        raise errors.OpExecError("Errors during disk copy: %s" %
6421
                                 (",".join(errs),))
6422

    
6423
    instance.primary_node = target_node
6424
    self.cfg.Update(instance, feedback_fn)
6425

    
6426
    self.LogInfo("Removing the disks on the original node")
6427
    _RemoveDisks(self, instance, target_node=source_node)
6428

    
6429
    # Only start the instance if it's marked as up
6430
    if instance.admin_up:
6431
      self.LogInfo("Starting instance %s on node %s",
6432
                   instance.name, target_node)
6433

    
6434
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
6435
                                           ignore_secondaries=True)
6436
      if not disks_ok:
6437
        _ShutdownInstanceDisks(self, instance)
6438
        raise errors.OpExecError("Can't activate the instance's disks")
6439

    
6440
      result = self.rpc.call_instance_start(target_node, instance, None, None)
6441
      msg = result.fail_msg
6442
      if msg:
6443
        _ShutdownInstanceDisks(self, instance)
6444
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
6445
                                 (instance.name, target_node, msg))
6446

    
6447

    
6448
class LUNodeMigrate(LogicalUnit):
6449
  """Migrate all instances from a node.
6450

6451
  """
6452
  HPATH = "node-migrate"
6453
  HTYPE = constants.HTYPE_NODE
6454
  REQ_BGL = False
6455

    
6456
  def CheckArguments(self):
6457
    _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
6458

    
6459
  def ExpandNames(self):
6460
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6461

    
6462
    self.needed_locks = {}
6463

    
6464
    # Create tasklets for migrating instances for all instances on this node
6465
    names = []
6466
    tasklets = []
6467

    
6468
    self.lock_all_nodes = False
6469

    
6470
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
6471
      logging.debug("Migrating instance %s", inst.name)
6472
      names.append(inst.name)
6473

    
6474
      tasklets.append(TLMigrateInstance(self, inst.name, cleanup=False))
6475

    
6476
      if inst.disk_template in constants.DTS_EXT_MIRROR:
6477
        # We need to lock all nodes, as the iallocator will choose the
6478
        # destination nodes afterwards
6479
        self.lock_all_nodes = True
6480

    
6481
    self.tasklets = tasklets
6482

    
6483
    # Declare node locks
6484
    if self.lock_all_nodes:
6485
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6486
    else:
6487
      self.needed_locks[locking.LEVEL_NODE] = [self.op.node_name]
6488
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6489

    
6490
    # Declare instance locks
6491
    self.needed_locks[locking.LEVEL_INSTANCE] = names
6492

    
6493
  def DeclareLocks(self, level):
6494
    if level == locking.LEVEL_NODE and not self.lock_all_nodes:
6495
      self._LockInstancesNodes()
6496

    
6497
  def BuildHooksEnv(self):
6498
    """Build hooks env.
6499

6500
    This runs on the master, the primary and all the secondaries.
6501

6502
    """
6503
    return {
6504
      "NODE_NAME": self.op.node_name,
6505
      }
6506

    
6507
  def BuildHooksNodes(self):
6508
    """Build hooks nodes.
6509

6510
    """
6511
    nl = [self.cfg.GetMasterNode()]
6512
    return (nl, nl)
6513

    
6514

    
6515
class TLMigrateInstance(Tasklet):
6516
  """Tasklet class for instance migration.
6517

6518
  @type live: boolean
6519
  @ivar live: whether the migration will be done live or non-live;
6520
      this variable is initalized only after CheckPrereq has run
6521
  @type cleanup: boolean
6522
  @ivar cleanup: Wheater we cleanup from a failed migration
6523
  @type iallocator: string
6524
  @ivar iallocator: The iallocator used to determine target_node
6525
  @type target_node: string
6526
  @ivar target_node: If given, the target_node to reallocate the instance to
6527
  @type failover: boolean
6528
  @ivar failover: Whether operation results in failover or migration
6529
  @type fallback: boolean
6530
  @ivar fallback: Whether fallback to failover is allowed if migration not
6531
                  possible
6532
  @type ignore_consistency: boolean
6533
  @ivar ignore_consistency: Wheter we should ignore consistency between source
6534
                            and target node
6535
  @type shutdown_timeout: int
6536
  @ivar shutdown_timeout: In case of failover timeout of the shutdown
6537

6538
  """
6539
  def __init__(self, lu, instance_name, cleanup=False,
6540
               failover=False, fallback=False,
6541
               ignore_consistency=False,
6542
               shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT):
6543
    """Initializes this class.
6544

6545
    """
6546
    Tasklet.__init__(self, lu)
6547

    
6548
    # Parameters
6549
    self.instance_name = instance_name
6550
    self.cleanup = cleanup
6551
    self.live = False # will be overridden later
6552
    self.failover = failover
6553
    self.fallback = fallback
6554
    self.ignore_consistency = ignore_consistency
6555
    self.shutdown_timeout = shutdown_timeout
6556

    
6557
  def CheckPrereq(self):
6558
    """Check prerequisites.
6559

6560
    This checks that the instance is in the cluster.
6561

6562
    """
6563
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
6564
    instance = self.cfg.GetInstanceInfo(instance_name)
6565
    assert instance is not None
6566
    self.instance = instance
6567

    
6568
    if (not self.cleanup and not instance.admin_up and not self.failover and
6569
        self.fallback):
6570
      self.lu.LogInfo("Instance is marked down, fallback allowed, switching"
6571
                      " to failover")
6572
      self.failover = True
6573

    
6574
    if instance.disk_template not in constants.DTS_MIRRORED:
6575
      if self.failover:
6576
        text = "failovers"
6577
      else:
6578
        text = "migrations"
6579
      raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
6580
                                 " %s" % (instance.disk_template, text),
6581
                                 errors.ECODE_STATE)
6582

    
6583
    if instance.disk_template in constants.DTS_EXT_MIRROR:
6584
      _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
6585

    
6586
      if self.lu.op.iallocator:
6587
        self._RunAllocator()
6588
      else:
6589
        # We set set self.target_node as it is required by
6590
        # BuildHooksEnv
6591
        self.target_node = self.lu.op.target_node
6592

    
6593
      # self.target_node is already populated, either directly or by the
6594
      # iallocator run
6595
      target_node = self.target_node
6596
      if self.target_node == instance.primary_node:
6597
        raise errors.OpPrereqError("Cannot migrate instance %s"
6598
                                   " to its primary (%s)" %
6599
                                   (instance.name, instance.primary_node))
6600

    
6601
      if len(self.lu.tasklets) == 1:
6602
        # It is safe to release locks only when we're the only tasklet
6603
        # in the LU
6604
        _ReleaseLocks(self.lu, locking.LEVEL_NODE,
6605
                      keep=[instance.primary_node, self.target_node])
6606

    
6607
    else:
6608
      secondary_nodes = instance.secondary_nodes
6609
      if not secondary_nodes:
6610
        raise errors.ConfigurationError("No secondary node but using"
6611
                                        " %s disk template" %
6612
                                        instance.disk_template)
6613
      target_node = secondary_nodes[0]
6614
      if self.lu.op.iallocator or (self.lu.op.target_node and
6615
                                   self.lu.op.target_node != target_node):
6616
        if self.failover:
6617
          text = "failed over"
6618
        else:
6619
          text = "migrated"
6620
        raise errors.OpPrereqError("Instances with disk template %s cannot"
6621
                                   " be %s to arbitrary nodes"
6622
                                   " (neither an iallocator nor a target"
6623
                                   " node can be passed)" %
6624
                                   (instance.disk_template, text),
6625
                                   errors.ECODE_INVAL)
6626

    
6627
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
6628

    
6629
    # check memory requirements on the secondary node
6630
    if not self.failover or instance.admin_up:
6631
      _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
6632
                           instance.name, i_be[constants.BE_MEMORY],
6633
                           instance.hypervisor)
6634
    else:
6635
      self.lu.LogInfo("Not checking memory on the secondary node as"
6636
                      " instance will not be started")
6637

    
6638
    # check bridge existance
6639
    _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
6640

    
6641
    if not self.cleanup:
6642
      _CheckNodeNotDrained(self.lu, target_node)
6643
      if not self.failover:
6644
        result = self.rpc.call_instance_migratable(instance.primary_node,
6645
                                                   instance)
6646
        if result.fail_msg and self.fallback:
6647
          self.lu.LogInfo("Can't migrate, instance offline, fallback to"
6648
                          " failover")
6649
          self.failover = True
6650
        else:
6651
          result.Raise("Can't migrate, please use failover",
6652
                       prereq=True, ecode=errors.ECODE_STATE)
6653

    
6654
    assert not (self.failover and self.cleanup)
6655

    
6656
    if not self.failover:
6657
      if self.lu.op.live is not None and self.lu.op.mode is not None:
6658
        raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
6659
                                   " parameters are accepted",
6660
                                   errors.ECODE_INVAL)
6661
      if self.lu.op.live is not None:
6662
        if self.lu.op.live:
6663
          self.lu.op.mode = constants.HT_MIGRATION_LIVE
6664
        else:
6665
          self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
6666
        # reset the 'live' parameter to None so that repeated
6667
        # invocations of CheckPrereq do not raise an exception
6668
        self.lu.op.live = None
6669
      elif self.lu.op.mode is None:
6670
        # read the default value from the hypervisor
6671
        i_hv = self.cfg.GetClusterInfo().FillHV(self.instance,
6672
                                                skip_globals=False)
6673
        self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
6674

    
6675
      self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
6676
    else:
6677
      # Failover is never live
6678
      self.live = False
6679

    
6680
  def _RunAllocator(self):
6681
    """Run the allocator based on input opcode.
6682

6683
    """
6684
    ial = IAllocator(self.cfg, self.rpc,
6685
                     mode=constants.IALLOCATOR_MODE_RELOC,
6686
                     name=self.instance_name,
6687
                     # TODO See why hail breaks with a single node below
6688
                     relocate_from=[self.instance.primary_node,
6689
                                    self.instance.primary_node],
6690
                     )
6691

    
6692
    ial.Run(self.lu.op.iallocator)
6693

    
6694
    if not ial.success:
6695
      raise errors.OpPrereqError("Can't compute nodes using"
6696
                                 " iallocator '%s': %s" %
6697
                                 (self.lu.op.iallocator, ial.info),
6698
                                 errors.ECODE_NORES)
6699
    if len(ial.result) != ial.required_nodes:
6700
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6701
                                 " of nodes (%s), required %s" %
6702
                                 (self.lu.op.iallocator, len(ial.result),
6703
                                  ial.required_nodes), errors.ECODE_FAULT)
6704
    self.target_node = ial.result[0]
6705
    self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
6706
                 self.instance_name, self.lu.op.iallocator,
6707
                 utils.CommaJoin(ial.result))
6708

    
6709
  def _WaitUntilSync(self):
6710
    """Poll with custom rpc for disk sync.
6711

6712
    This uses our own step-based rpc call.
6713

6714
    """
6715
    self.feedback_fn("* wait until resync is done")
6716
    all_done = False
6717
    while not all_done:
6718
      all_done = True
6719
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
6720
                                            self.nodes_ip,
6721
                                            self.instance.disks)
6722
      min_percent = 100
6723
      for node, nres in result.items():
6724
        nres.Raise("Cannot resync disks on node %s" % node)
6725
        node_done, node_percent = nres.payload
6726
        all_done = all_done and node_done
6727
        if node_percent is not None:
6728
          min_percent = min(min_percent, node_percent)
6729
      if not all_done:
6730
        if min_percent < 100:
6731
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
6732
        time.sleep(2)
6733

    
6734
  def _EnsureSecondary(self, node):
6735
    """Demote a node to secondary.
6736

6737
    """
6738
    self.feedback_fn("* switching node %s to secondary mode" % node)
6739

    
6740
    for dev in self.instance.disks:
6741
      self.cfg.SetDiskID(dev, node)
6742

    
6743
    result = self.rpc.call_blockdev_close(node, self.instance.name,
6744
                                          self.instance.disks)
6745
    result.Raise("Cannot change disk to secondary on node %s" % node)
6746

    
6747
  def _GoStandalone(self):
6748
    """Disconnect from the network.
6749

6750
    """
6751
    self.feedback_fn("* changing into standalone mode")
6752
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
6753
                                               self.instance.disks)
6754
    for node, nres in result.items():
6755
      nres.Raise("Cannot disconnect disks node %s" % node)
6756

    
6757
  def _GoReconnect(self, multimaster):
6758
    """Reconnect to the network.
6759

6760
    """
6761
    if multimaster:
6762
      msg = "dual-master"
6763
    else:
6764
      msg = "single-master"
6765
    self.feedback_fn("* changing disks into %s mode" % msg)
6766
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
6767
                                           self.instance.disks,
6768
                                           self.instance.name, multimaster)
6769
    for node, nres in result.items():
6770
      nres.Raise("Cannot change disks config on node %s" % node)
6771

    
6772
  def _ExecCleanup(self):
6773
    """Try to cleanup after a failed migration.
6774

6775
    The cleanup is done by:
6776
      - check that the instance is running only on one node
6777
        (and update the config if needed)
6778
      - change disks on its secondary node to secondary
6779
      - wait until disks are fully synchronized
6780
      - disconnect from the network
6781
      - change disks into single-master mode
6782
      - wait again until disks are fully synchronized
6783

6784
    """
6785
    instance = self.instance
6786
    target_node = self.target_node
6787
    source_node = self.source_node
6788

    
6789
    # check running on only one node
6790
    self.feedback_fn("* checking where the instance actually runs"
6791
                     " (if this hangs, the hypervisor might be in"
6792
                     " a bad state)")
6793
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
6794
    for node, result in ins_l.items():
6795
      result.Raise("Can't contact node %s" % node)
6796

    
6797
    runningon_source = instance.name in ins_l[source_node].payload
6798
    runningon_target = instance.name in ins_l[target_node].payload
6799

    
6800
    if runningon_source and runningon_target:
6801
      raise errors.OpExecError("Instance seems to be running on two nodes,"
6802
                               " or the hypervisor is confused; you will have"
6803
                               " to ensure manually that it runs only on one"
6804
                               " and restart this operation")
6805

    
6806
    if not (runningon_source or runningon_target):
6807
      raise errors.OpExecError("Instance does not seem to be running at all;"
6808
                               " in this case it's safer to repair by"
6809
                               " running 'gnt-instance stop' to ensure disk"
6810
                               " shutdown, and then restarting it")
6811

    
6812
    if runningon_target:
6813
      # the migration has actually succeeded, we need to update the config
6814
      self.feedback_fn("* instance running on secondary node (%s),"
6815
                       " updating config" % target_node)
6816
      instance.primary_node = target_node
6817
      self.cfg.Update(instance, self.feedback_fn)
6818
      demoted_node = source_node
6819
    else:
6820
      self.feedback_fn("* instance confirmed to be running on its"
6821
                       " primary node (%s)" % source_node)
6822
      demoted_node = target_node
6823

    
6824
    if instance.disk_template in constants.DTS_INT_MIRROR:
6825
      self._EnsureSecondary(demoted_node)
6826
      try:
6827
        self._WaitUntilSync()
6828
      except errors.OpExecError:
6829
        # we ignore here errors, since if the device is standalone, it
6830
        # won't be able to sync
6831
        pass
6832
      self._GoStandalone()
6833
      self._GoReconnect(False)
6834
      self._WaitUntilSync()
6835

    
6836
    self.feedback_fn("* done")
6837

    
6838
  def _RevertDiskStatus(self):
6839
    """Try to revert the disk status after a failed migration.
6840

6841
    """
6842
    target_node = self.target_node
6843
    if self.instance.disk_template in constants.DTS_EXT_MIRROR:
6844
      return
6845

    
6846
    try:
6847
      self._EnsureSecondary(target_node)
6848
      self._GoStandalone()
6849
      self._GoReconnect(False)
6850
      self._WaitUntilSync()
6851
    except errors.OpExecError, err:
6852
      self.lu.LogWarning("Migration failed and I can't reconnect the drives,"
6853
                         " please try to recover the instance manually;"
6854
                         " error '%s'" % str(err))
6855

    
6856
  def _AbortMigration(self):
6857
    """Call the hypervisor code to abort a started migration.
6858

6859
    """
6860
    instance = self.instance
6861
    target_node = self.target_node
6862
    migration_info = self.migration_info
6863

    
6864
    abort_result = self.rpc.call_finalize_migration(target_node,
6865
                                                    instance,
6866
                                                    migration_info,
6867
                                                    False)
6868
    abort_msg = abort_result.fail_msg
6869
    if abort_msg:
6870
      logging.error("Aborting migration failed on target node %s: %s",
6871
                    target_node, abort_msg)
6872
      # Don't raise an exception here, as we stil have to try to revert the
6873
      # disk status, even if this step failed.
6874

    
6875
  def _ExecMigration(self):
6876
    """Migrate an instance.
6877

6878
    The migrate is done by:
6879
      - change the disks into dual-master mode
6880
      - wait until disks are fully synchronized again
6881
      - migrate the instance
6882
      - change disks on the new secondary node (the old primary) to secondary
6883
      - wait until disks are fully synchronized
6884
      - change disks into single-master mode
6885

6886
    """
6887
    instance = self.instance
6888
    target_node = self.target_node
6889
    source_node = self.source_node
6890

    
6891
    self.feedback_fn("* checking disk consistency between source and target")
6892
    for dev in instance.disks:
6893
      if not _CheckDiskConsistency(self.lu, dev, target_node, False):
6894
        raise errors.OpExecError("Disk %s is degraded or not fully"
6895
                                 " synchronized on target node,"
6896
                                 " aborting migration" % dev.iv_name)
6897

    
6898
    # First get the migration information from the remote node
6899
    result = self.rpc.call_migration_info(source_node, instance)
6900
    msg = result.fail_msg
6901
    if msg:
6902
      log_err = ("Failed fetching source migration information from %s: %s" %
6903
                 (source_node, msg))
6904
      logging.error(log_err)
6905
      raise errors.OpExecError(log_err)
6906

    
6907
    self.migration_info = migration_info = result.payload
6908

    
6909
    if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
6910
      # Then switch the disks to master/master mode
6911
      self._EnsureSecondary(target_node)
6912
      self._GoStandalone()
6913
      self._GoReconnect(True)
6914
      self._WaitUntilSync()
6915

    
6916
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
6917
    result = self.rpc.call_accept_instance(target_node,
6918
                                           instance,
6919
                                           migration_info,
6920
                                           self.nodes_ip[target_node])
6921

    
6922
    msg = result.fail_msg
6923
    if msg:
6924
      logging.error("Instance pre-migration failed, trying to revert"
6925
                    " disk status: %s", msg)
6926
      self.feedback_fn("Pre-migration failed, aborting")
6927
      self._AbortMigration()
6928
      self._RevertDiskStatus()
6929
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
6930
                               (instance.name, msg))
6931

    
6932
    self.feedback_fn("* migrating instance to %s" % target_node)
6933
    result = self.rpc.call_instance_migrate(source_node, instance,
6934
                                            self.nodes_ip[target_node],
6935
                                            self.live)
6936
    msg = result.fail_msg
6937
    if msg:
6938
      logging.error("Instance migration failed, trying to revert"
6939
                    " disk status: %s", msg)
6940
      self.feedback_fn("Migration failed, aborting")
6941
      self._AbortMigration()
6942
      self._RevertDiskStatus()
6943
      raise errors.OpExecError("Could not migrate instance %s: %s" %
6944
                               (instance.name, msg))
6945

    
6946
    instance.primary_node = target_node
6947
    # distribute new instance config to the other nodes
6948
    self.cfg.Update(instance, self.feedback_fn)
6949

    
6950
    result = self.rpc.call_finalize_migration(target_node,
6951
                                              instance,
6952
                                              migration_info,
6953
                                              True)
6954
    msg = result.fail_msg
6955
    if msg:
6956
      logging.error("Instance migration succeeded, but finalization failed:"
6957
                    " %s", msg)
6958
      raise errors.OpExecError("Could not finalize instance migration: %s" %
6959
                               msg)
6960

    
6961
    if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
6962
      self._EnsureSecondary(source_node)
6963
      self._WaitUntilSync()
6964
      self._GoStandalone()
6965
      self._GoReconnect(False)
6966
      self._WaitUntilSync()
6967

    
6968
    self.feedback_fn("* done")
6969

    
6970
  def _ExecFailover(self):
6971
    """Failover an instance.
6972

6973
    The failover is done by shutting it down on its present node and
6974
    starting it on the secondary.
6975

6976
    """
6977
    instance = self.instance
6978
    primary_node = self.cfg.GetNodeInfo(instance.primary_node)
6979

    
6980
    source_node = instance.primary_node
6981
    target_node = self.target_node
6982

    
6983
    if instance.admin_up:
6984
      self.feedback_fn("* checking disk consistency between source and target")
6985
      for dev in instance.disks:
6986
        # for drbd, these are drbd over lvm
6987
        if not _CheckDiskConsistency(self, dev, target_node, False):
6988
          if not self.ignore_consistency:
6989
            raise errors.OpExecError("Disk %s is degraded on target node,"
6990
                                     " aborting failover" % dev.iv_name)
6991
    else:
6992
      self.feedback_fn("* not checking disk consistency as instance is not"
6993
                       " running")
6994

    
6995
    self.feedback_fn("* shutting down instance on source node")
6996
    logging.info("Shutting down instance %s on node %s",
6997
                 instance.name, source_node)
6998

    
6999
    result = self.rpc.call_instance_shutdown(source_node, instance,
7000
                                             self.shutdown_timeout)
7001
    msg = result.fail_msg
7002
    if msg:
7003
      if self.ignore_consistency or primary_node.offline:
7004
        self.lu.LogWarning("Could not shutdown instance %s on node %s,"
7005
                           " proceeding anyway; please make sure node"
7006
                           " %s is down; error details: %s",
7007
                           instance.name, source_node, source_node, msg)
7008
      else:
7009
        raise errors.OpExecError("Could not shutdown instance %s on"
7010
                                 " node %s: %s" %
7011
                                 (instance.name, source_node, msg))
7012

    
7013
    self.feedback_fn("* deactivating the instance's disks on source node")
7014
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
7015
      raise errors.OpExecError("Can't shut down the instance's disks.")
7016

    
7017
    instance.primary_node = target_node
7018
    # distribute new instance config to the other nodes
7019
    self.cfg.Update(instance, self.feedback_fn)
7020

    
7021
    # Only start the instance if it's marked as up
7022
    if instance.admin_up:
7023
      self.feedback_fn("* activating the instance's disks on target node")
7024
      logging.info("Starting instance %s on node %s",
7025
                   instance.name, target_node)
7026

    
7027
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
7028
                                           ignore_secondaries=True)
7029
      if not disks_ok:
7030
        _ShutdownInstanceDisks(self, instance)
7031
        raise errors.OpExecError("Can't activate the instance's disks")
7032

    
7033
      self.feedback_fn("* starting the instance on the target node")
7034
      result = self.rpc.call_instance_start(target_node, instance, None, None)
7035
      msg = result.fail_msg
7036
      if msg:
7037
        _ShutdownInstanceDisks(self, instance)
7038
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
7039
                                 (instance.name, target_node, msg))
7040

    
7041
  def Exec(self, feedback_fn):
7042
    """Perform the migration.
7043

7044
    """
7045
    self.feedback_fn = feedback_fn
7046
    self.source_node = self.instance.primary_node
7047

    
7048
    # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
7049
    if self.instance.disk_template in constants.DTS_INT_MIRROR:
7050
      self.target_node = self.instance.secondary_nodes[0]
7051
      # Otherwise self.target_node has been populated either
7052
      # directly, or through an iallocator.
7053

    
7054
    self.all_nodes = [self.source_node, self.target_node]
7055
    self.nodes_ip = {
7056
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
7057
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
7058
      }
7059

    
7060
    if self.failover:
7061
      feedback_fn("Failover instance %s" % self.instance.name)
7062
      self._ExecFailover()
7063
    else:
7064
      feedback_fn("Migrating instance %s" % self.instance.name)
7065

    
7066
      if self.cleanup:
7067
        return self._ExecCleanup()
7068
      else:
7069
        return self._ExecMigration()
7070

    
7071

    
7072
def _CreateBlockDev(lu, node, instance, device, force_create,
7073
                    info, force_open):
7074
  """Create a tree of block devices on a given node.
7075

7076
  If this device type has to be created on secondaries, create it and
7077
  all its children.
7078

7079
  If not, just recurse to children keeping the same 'force' value.
7080

7081
  @param lu: the lu on whose behalf we execute
7082
  @param node: the node on which to create the device
7083
  @type instance: L{objects.Instance}
7084
  @param instance: the instance which owns the device
7085
  @type device: L{objects.Disk}
7086
  @param device: the device to create
7087
  @type force_create: boolean
7088
  @param force_create: whether to force creation of this device; this
7089
      will be change to True whenever we find a device which has
7090
      CreateOnSecondary() attribute
7091
  @param info: the extra 'metadata' we should attach to the device
7092
      (this will be represented as a LVM tag)
7093
  @type force_open: boolean
7094
  @param force_open: this parameter will be passes to the
7095
      L{backend.BlockdevCreate} function where it specifies
7096
      whether we run on primary or not, and it affects both
7097
      the child assembly and the device own Open() execution
7098

7099
  """
7100
  if device.CreateOnSecondary():
7101
    force_create = True
7102

    
7103
  if device.children:
7104
    for child in device.children:
7105
      _CreateBlockDev(lu, node, instance, child, force_create,
7106
                      info, force_open)
7107

    
7108
  if not force_create:
7109
    return
7110

    
7111
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
7112

    
7113

    
7114
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
7115
  """Create a single block device on a given node.
7116

7117
  This will not recurse over children of the device, so they must be
7118
  created in advance.
7119

7120
  @param lu: the lu on whose behalf we execute
7121
  @param node: the node on which to create the device
7122
  @type instance: L{objects.Instance}
7123
  @param instance: the instance which owns the device
7124
  @type device: L{objects.Disk}
7125
  @param device: the device to create
7126
  @param info: the extra 'metadata' we should attach to the device
7127
      (this will be represented as a LVM tag)
7128
  @type force_open: boolean
7129
  @param force_open: this parameter will be passes to the
7130
      L{backend.BlockdevCreate} function where it specifies
7131
      whether we run on primary or not, and it affects both
7132
      the child assembly and the device own Open() execution
7133

7134
  """
7135
  lu.cfg.SetDiskID(device, node)
7136
  result = lu.rpc.call_blockdev_create(node, device, device.size,
7137
                                       instance.name, force_open, info)
7138
  result.Raise("Can't create block device %s on"
7139
               " node %s for instance %s" % (device, node, instance.name))
7140
  if device.physical_id is None:
7141
    device.physical_id = result.payload
7142

    
7143

    
7144
def _GenerateUniqueNames(lu, exts):
7145
  """Generate a suitable LV name.
7146

7147
  This will generate a logical volume name for the given instance.
7148

7149
  """
7150
  results = []
7151
  for val in exts:
7152
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
7153
    results.append("%s%s" % (new_id, val))
7154
  return results
7155

    
7156

    
7157
def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
7158
                         iv_name, p_minor, s_minor):
7159
  """Generate a drbd8 device complete with its children.
7160

7161
  """
7162
  assert len(vgnames) == len(names) == 2
7163
  port = lu.cfg.AllocatePort()
7164
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
7165
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
7166
                          logical_id=(vgnames[0], names[0]))
7167
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
7168
                          logical_id=(vgnames[1], names[1]))
7169
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
7170
                          logical_id=(primary, secondary, port,
7171
                                      p_minor, s_minor,
7172
                                      shared_secret),
7173
                          children=[dev_data, dev_meta],
7174
                          iv_name=iv_name)
7175
  return drbd_dev
7176

    
7177

    
7178
def _GenerateDiskTemplate(lu, template_name,
7179
                          instance_name, primary_node,
7180
                          secondary_nodes, disk_info,
7181
                          file_storage_dir, file_driver,
7182
                          base_index, feedback_fn):
7183
  """Generate the entire disk layout for a given template type.
7184

7185
  """
7186
  #TODO: compute space requirements
7187

    
7188
  vgname = lu.cfg.GetVGName()
7189
  disk_count = len(disk_info)
7190
  disks = []
7191
  if template_name == constants.DT_DISKLESS:
7192
    pass
7193
  elif template_name == constants.DT_PLAIN:
7194
    if len(secondary_nodes) != 0:
7195
      raise errors.ProgrammerError("Wrong template configuration")
7196

    
7197
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
7198
                                      for i in range(disk_count)])
7199
    for idx, disk in enumerate(disk_info):
7200
      disk_index = idx + base_index
7201
      vg = disk.get(constants.IDISK_VG, vgname)
7202
      feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
7203
      disk_dev = objects.Disk(dev_type=constants.LD_LV,
7204
                              size=disk[constants.IDISK_SIZE],
7205
                              logical_id=(vg, names[idx]),
7206
                              iv_name="disk/%d" % disk_index,
7207
                              mode=disk[constants.IDISK_MODE])
7208
      disks.append(disk_dev)
7209
  elif template_name == constants.DT_DRBD8:
7210
    if len(secondary_nodes) != 1:
7211
      raise errors.ProgrammerError("Wrong template configuration")
7212
    remote_node = secondary_nodes[0]
7213
    minors = lu.cfg.AllocateDRBDMinor(
7214
      [primary_node, remote_node] * len(disk_info), instance_name)
7215

    
7216
    names = []
7217
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
7218
                                               for i in range(disk_count)]):
7219
      names.append(lv_prefix + "_data")
7220
      names.append(lv_prefix + "_meta")
7221
    for idx, disk in enumerate(disk_info):
7222
      disk_index = idx + base_index
7223
      data_vg = disk.get(constants.IDISK_VG, vgname)
7224
      meta_vg = disk.get(constants.IDISK_METAVG, data_vg)
7225
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
7226
                                      disk[constants.IDISK_SIZE],
7227
                                      [data_vg, meta_vg],
7228
                                      names[idx * 2:idx * 2 + 2],
7229
                                      "disk/%d" % disk_index,
7230
                                      minors[idx * 2], minors[idx * 2 + 1])
7231
      disk_dev.mode = disk[constants.IDISK_MODE]
7232
      disks.append(disk_dev)
7233
  elif template_name == constants.DT_FILE:
7234
    if len(secondary_nodes) != 0:
7235
      raise errors.ProgrammerError("Wrong template configuration")
7236

    
7237
    opcodes.RequireFileStorage()
7238

    
7239
    for idx, disk in enumerate(disk_info):
7240
      disk_index = idx + base_index
7241
      disk_dev = objects.Disk(dev_type=constants.LD_FILE,
7242
                              size=disk[constants.IDISK_SIZE],
7243
                              iv_name="disk/%d" % disk_index,
7244
                              logical_id=(file_driver,
7245
                                          "%s/disk%d" % (file_storage_dir,
7246
                                                         disk_index)),
7247
                              mode=disk[constants.IDISK_MODE])
7248
      disks.append(disk_dev)
7249
  elif template_name == constants.DT_SHARED_FILE:
7250
    if len(secondary_nodes) != 0:
7251
      raise errors.ProgrammerError("Wrong template configuration")
7252

    
7253
    opcodes.RequireSharedFileStorage()
7254

    
7255
    for idx, disk in enumerate(disk_info):
7256
      disk_index = idx + base_index
7257
      disk_dev = objects.Disk(dev_type=constants.LD_FILE,
7258
                              size=disk[constants.IDISK_SIZE],
7259
                              iv_name="disk/%d" % disk_index,
7260
                              logical_id=(file_driver,
7261
                                          "%s/disk%d" % (file_storage_dir,
7262
                                                         disk_index)),
7263
                              mode=disk[constants.IDISK_MODE])
7264
      disks.append(disk_dev)
7265
  elif template_name == constants.DT_BLOCK:
7266
    if len(secondary_nodes) != 0:
7267
      raise errors.ProgrammerError("Wrong template configuration")
7268

    
7269
    for idx, disk in enumerate(disk_info):
7270
      disk_index = idx + base_index
7271
      disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV,
7272
                              size=disk[constants.IDISK_SIZE],
7273
                              logical_id=(constants.BLOCKDEV_DRIVER_MANUAL,
7274
                                          disk[constants.IDISK_ADOPT]),
7275
                              iv_name="disk/%d" % disk_index,
7276
                              mode=disk[constants.IDISK_MODE])
7277
      disks.append(disk_dev)
7278

    
7279
  else:
7280
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
7281
  return disks
7282

    
7283

    
7284
def _GetInstanceInfoText(instance):
7285
  """Compute that text that should be added to the disk's metadata.
7286

7287
  """
7288
  return "originstname+%s" % instance.name
7289

    
7290

    
7291
def _CalcEta(time_taken, written, total_size):
7292
  """Calculates the ETA based on size written and total size.
7293

7294
  @param time_taken: The time taken so far
7295
  @param written: amount written so far
7296
  @param total_size: The total size of data to be written
7297
  @return: The remaining time in seconds
7298

7299
  """
7300
  avg_time = time_taken / float(written)
7301
  return (total_size - written) * avg_time
7302

    
7303

    
7304
def _WipeDisks(lu, instance):
7305
  """Wipes instance disks.
7306

7307
  @type lu: L{LogicalUnit}
7308
  @param lu: the logical unit on whose behalf we execute
7309
  @type instance: L{objects.Instance}
7310
  @param instance: the instance whose disks we should create
7311
  @return: the success of the wipe
7312

7313
  """
7314
  node = instance.primary_node
7315

    
7316
  for device in instance.disks:
7317
    lu.cfg.SetDiskID(device, node)
7318

    
7319
  logging.info("Pause sync of instance %s disks", instance.name)
7320
  result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
7321

    
7322
  for idx, success in enumerate(result.payload):
7323
    if not success:
7324
      logging.warn("pause-sync of instance %s for disks %d failed",
7325
                   instance.name, idx)
7326

    
7327
  try:
7328
    for idx, device in enumerate(instance.disks):
7329
      # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
7330
      # MAX_WIPE_CHUNK at max
7331
      wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
7332
                            constants.MIN_WIPE_CHUNK_PERCENT)
7333
      # we _must_ make this an int, otherwise rounding errors will
7334
      # occur
7335
      wipe_chunk_size = int(wipe_chunk_size)
7336

    
7337
      lu.LogInfo("* Wiping disk %d", idx)
7338
      logging.info("Wiping disk %d for instance %s, node %s using"
7339
                   " chunk size %s", idx, instance.name, node, wipe_chunk_size)
7340

    
7341
      offset = 0
7342
      size = device.size
7343
      last_output = 0
7344
      start_time = time.time()
7345

    
7346
      while offset < size:
7347
        wipe_size = min(wipe_chunk_size, size - offset)
7348
        logging.debug("Wiping disk %d, offset %s, chunk %s",
7349
                      idx, offset, wipe_size)
7350
        result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
7351
        result.Raise("Could not wipe disk %d at offset %d for size %d" %
7352
                     (idx, offset, wipe_size))
7353
        now = time.time()
7354
        offset += wipe_size
7355
        if now - last_output >= 60:
7356
          eta = _CalcEta(now - start_time, offset, size)
7357
          lu.LogInfo(" - done: %.1f%% ETA: %s" %
7358
                     (offset / float(size) * 100, utils.FormatSeconds(eta)))
7359
          last_output = now
7360
  finally:
7361
    logging.info("Resume sync of instance %s disks", instance.name)
7362

    
7363
    result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, False)
7364

    
7365
    for idx, success in enumerate(result.payload):
7366
      if not success:
7367
        lu.LogWarning("Resume sync of disk %d failed, please have a"
7368
                      " look at the status and troubleshoot the issue", idx)
7369
        logging.warn("resume-sync of instance %s for disks %d failed",
7370
                     instance.name, idx)
7371

    
7372

    
7373
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
7374
  """Create all disks for an instance.
7375

7376
  This abstracts away some work from AddInstance.
7377

7378
  @type lu: L{LogicalUnit}
7379
  @param lu: the logical unit on whose behalf we execute
7380
  @type instance: L{objects.Instance}
7381
  @param instance: the instance whose disks we should create
7382
  @type to_skip: list
7383
  @param to_skip: list of indices to skip
7384
  @type target_node: string
7385
  @param target_node: if passed, overrides the target node for creation
7386
  @rtype: boolean
7387
  @return: the success of the creation
7388

7389
  """
7390
  info = _GetInstanceInfoText(instance)
7391
  if target_node is None:
7392
    pnode = instance.primary_node
7393
    all_nodes = instance.all_nodes
7394
  else:
7395
    pnode = target_node
7396
    all_nodes = [pnode]
7397

    
7398
  if instance.disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
7399
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
7400
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
7401

    
7402
    result.Raise("Failed to create directory '%s' on"
7403
                 " node %s" % (file_storage_dir, pnode))
7404

    
7405
  # Note: this needs to be kept in sync with adding of disks in
7406
  # LUInstanceSetParams
7407
  for idx, device in enumerate(instance.disks):
7408
    if to_skip and idx in to_skip:
7409
      continue
7410
    logging.info("Creating volume %s for instance %s",
7411
                 device.iv_name, instance.name)
7412
    #HARDCODE
7413
    for node in all_nodes:
7414
      f_create = node == pnode
7415
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
7416

    
7417

    
7418
def _RemoveDisks(lu, instance, target_node=None):
7419
  """Remove all disks for an instance.
7420

7421
  This abstracts away some work from `AddInstance()` and
7422
  `RemoveInstance()`. Note that in case some of the devices couldn't
7423
  be removed, the removal will continue with the other ones (compare
7424
  with `_CreateDisks()`).
7425

7426
  @type lu: L{LogicalUnit}
7427
  @param lu: the logical unit on whose behalf we execute
7428
  @type instance: L{objects.Instance}
7429
  @param instance: the instance whose disks we should remove
7430
  @type target_node: string
7431
  @param target_node: used to override the node on which to remove the disks
7432
  @rtype: boolean
7433
  @return: the success of the removal
7434

7435
  """
7436
  logging.info("Removing block devices for instance %s", instance.name)
7437

    
7438
  all_result = True
7439
  for device in instance.disks:
7440
    if target_node:
7441
      edata = [(target_node, device)]
7442
    else:
7443
      edata = device.ComputeNodeTree(instance.primary_node)
7444
    for node, disk in edata:
7445
      lu.cfg.SetDiskID(disk, node)
7446
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
7447
      if msg:
7448
        lu.LogWarning("Could not remove block device %s on node %s,"
7449
                      " continuing anyway: %s", device.iv_name, node, msg)
7450
        all_result = False
7451

    
7452
  if instance.disk_template == constants.DT_FILE:
7453
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
7454
    if target_node:
7455
      tgt = target_node
7456
    else:
7457
      tgt = instance.primary_node
7458
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
7459
    if result.fail_msg:
7460
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
7461
                    file_storage_dir, instance.primary_node, result.fail_msg)
7462
      all_result = False
7463

    
7464
  return all_result
7465

    
7466

    
7467
def _ComputeDiskSizePerVG(disk_template, disks):
7468
  """Compute disk size requirements in the volume group
7469

7470
  """
7471
  def _compute(disks, payload):
7472
    """Universal algorithm.
7473

7474
    """
7475
    vgs = {}
7476
    for disk in disks:
7477
      vgs[disk[constants.IDISK_VG]] = \
7478
        vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
7479

    
7480
    return vgs
7481

    
7482
  # Required free disk space as a function of disk and swap space
7483
  req_size_dict = {
7484
    constants.DT_DISKLESS: {},
7485
    constants.DT_PLAIN: _compute(disks, 0),
7486
    # 128 MB are added for drbd metadata for each disk
7487
    constants.DT_DRBD8: _compute(disks, 128),
7488
    constants.DT_FILE: {},
7489
    constants.DT_SHARED_FILE: {},
7490
  }
7491

    
7492
  if disk_template not in req_size_dict:
7493
    raise errors.ProgrammerError("Disk template '%s' size requirement"
7494
                                 " is unknown" %  disk_template)
7495

    
7496
  return req_size_dict[disk_template]
7497

    
7498

    
7499
def _ComputeDiskSize(disk_template, disks):
7500
  """Compute disk size requirements in the volume group
7501

7502
  """
7503
  # Required free disk space as a function of disk and swap space
7504
  req_size_dict = {
7505
    constants.DT_DISKLESS: None,
7506
    constants.DT_PLAIN: sum(d[constants.IDISK_SIZE] for d in disks),
7507
    # 128 MB are added for drbd metadata for each disk
7508
    constants.DT_DRBD8: sum(d[constants.IDISK_SIZE] + 128 for d in disks),
7509
    constants.DT_FILE: None,
7510
    constants.DT_SHARED_FILE: 0,
7511
    constants.DT_BLOCK: 0,
7512
  }
7513

    
7514
  if disk_template not in req_size_dict:
7515
    raise errors.ProgrammerError("Disk template '%s' size requirement"
7516
                                 " is unknown" %  disk_template)
7517

    
7518
  return req_size_dict[disk_template]
7519

    
7520

    
7521
def _FilterVmNodes(lu, nodenames):
7522
  """Filters out non-vm_capable nodes from a list.
7523

7524
  @type lu: L{LogicalUnit}
7525
  @param lu: the logical unit for which we check
7526
  @type nodenames: list
7527
  @param nodenames: the list of nodes on which we should check
7528
  @rtype: list
7529
  @return: the list of vm-capable nodes
7530

7531
  """
7532
  vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
7533
  return [name for name in nodenames if name not in vm_nodes]
7534

    
7535

    
7536
def _CheckHVParams(lu, nodenames, hvname, hvparams):
7537
  """Hypervisor parameter validation.
7538

7539
  This function abstract the hypervisor parameter validation to be
7540
  used in both instance create and instance modify.
7541

7542
  @type lu: L{LogicalUnit}
7543
  @param lu: the logical unit for which we check
7544
  @type nodenames: list
7545
  @param nodenames: the list of nodes on which we should check
7546
  @type hvname: string
7547
  @param hvname: the name of the hypervisor we should use
7548
  @type hvparams: dict
7549
  @param hvparams: the parameters which we need to check
7550
  @raise errors.OpPrereqError: if the parameters are not valid
7551

7552
  """
7553
  nodenames = _FilterVmNodes(lu, nodenames)
7554
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
7555
                                                  hvname,
7556
                                                  hvparams)
7557
  for node in nodenames:
7558
    info = hvinfo[node]
7559
    if info.offline:
7560
      continue
7561
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
7562

    
7563

    
7564
def _CheckOSParams(lu, required, nodenames, osname, osparams):
7565
  """OS parameters validation.
7566

7567
  @type lu: L{LogicalUnit}
7568
  @param lu: the logical unit for which we check
7569
  @type required: boolean
7570
  @param required: whether the validation should fail if the OS is not
7571
      found
7572
  @type nodenames: list
7573
  @param nodenames: the list of nodes on which we should check
7574
  @type osname: string
7575
  @param osname: the name of the hypervisor we should use
7576
  @type osparams: dict
7577
  @param osparams: the parameters which we need to check
7578
  @raise errors.OpPrereqError: if the parameters are not valid
7579

7580
  """
7581
  nodenames = _FilterVmNodes(lu, nodenames)
7582
  result = lu.rpc.call_os_validate(required, nodenames, osname,
7583
                                   [constants.OS_VALIDATE_PARAMETERS],
7584
                                   osparams)
7585
  for node, nres in result.items():
7586
    # we don't check for offline cases since this should be run only
7587
    # against the master node and/or an instance's nodes
7588
    nres.Raise("OS Parameters validation failed on node %s" % node)
7589
    if not nres.payload:
7590
      lu.LogInfo("OS %s not found on node %s, validation skipped",
7591
                 osname, node)
7592

    
7593

    
7594
class LUInstanceCreate(LogicalUnit):
7595
  """Create an instance.
7596

7597
  """
7598
  HPATH = "instance-add"
7599
  HTYPE = constants.HTYPE_INSTANCE
7600
  REQ_BGL = False
7601

    
7602
  def CheckArguments(self):
7603
    """Check arguments.
7604

7605
    """
7606
    # do not require name_check to ease forward/backward compatibility
7607
    # for tools
7608
    if self.op.no_install and self.op.start:
7609
      self.LogInfo("No-installation mode selected, disabling startup")
7610
      self.op.start = False
7611
    # validate/normalize the instance name
7612
    self.op.instance_name = \
7613
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
7614

    
7615
    if self.op.ip_check and not self.op.name_check:
7616
      # TODO: make the ip check more flexible and not depend on the name check
7617
      raise errors.OpPrereqError("Cannot do IP address check without a name"
7618
                                 " check", errors.ECODE_INVAL)
7619

    
7620
    # check nics' parameter names
7621
    for nic in self.op.nics:
7622
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
7623

    
7624
    # check disks. parameter names and consistent adopt/no-adopt strategy
7625
    has_adopt = has_no_adopt = False
7626
    for disk in self.op.disks:
7627
      utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
7628
      if constants.IDISK_ADOPT in disk:
7629
        has_adopt = True
7630
      else:
7631
        has_no_adopt = True
7632
    if has_adopt and has_no_adopt:
7633
      raise errors.OpPrereqError("Either all disks are adopted or none is",
7634
                                 errors.ECODE_INVAL)
7635
    if has_adopt:
7636
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
7637
        raise errors.OpPrereqError("Disk adoption is not supported for the"
7638
                                   " '%s' disk template" %
7639
                                   self.op.disk_template,
7640
                                   errors.ECODE_INVAL)
7641
      if self.op.iallocator is not None:
7642
        raise errors.OpPrereqError("Disk adoption not allowed with an"
7643
                                   " iallocator script", errors.ECODE_INVAL)
7644
      if self.op.mode == constants.INSTANCE_IMPORT:
7645
        raise errors.OpPrereqError("Disk adoption not allowed for"
7646
                                   " instance import", errors.ECODE_INVAL)
7647
    else:
7648
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
7649
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
7650
                                   " but no 'adopt' parameter given" %
7651
                                   self.op.disk_template,
7652
                                   errors.ECODE_INVAL)
7653

    
7654
    self.adopt_disks = has_adopt
7655

    
7656
    # instance name verification
7657
    if self.op.name_check:
7658
      self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
7659
      self.op.instance_name = self.hostname1.name
7660
      # used in CheckPrereq for ip ping check
7661
      self.check_ip = self.hostname1.ip
7662
    else:
7663
      self.check_ip = None
7664

    
7665
    # file storage checks
7666
    if (self.op.file_driver and
7667
        not self.op.file_driver in constants.FILE_DRIVER):
7668
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
7669
                                 self.op.file_driver, errors.ECODE_INVAL)
7670

    
7671
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
7672
      raise errors.OpPrereqError("File storage directory path not absolute",
7673
                                 errors.ECODE_INVAL)
7674

    
7675
    ### Node/iallocator related checks
7676
    _CheckIAllocatorOrNode(self, "iallocator", "pnode")
7677

    
7678
    if self.op.pnode is not None:
7679
      if self.op.disk_template in constants.DTS_INT_MIRROR:
7680
        if self.op.snode is None:
7681
          raise errors.OpPrereqError("The networked disk templates need"
7682
                                     " a mirror node", errors.ECODE_INVAL)
7683
      elif self.op.snode:
7684
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
7685
                        " template")
7686
        self.op.snode = None
7687

    
7688
    self._cds = _GetClusterDomainSecret()
7689

    
7690
    if self.op.mode == constants.INSTANCE_IMPORT:
7691
      # On import force_variant must be True, because if we forced it at
7692
      # initial install, our only chance when importing it back is that it
7693
      # works again!
7694
      self.op.force_variant = True
7695

    
7696
      if self.op.no_install:
7697
        self.LogInfo("No-installation mode has no effect during import")
7698

    
7699
    elif self.op.mode == constants.INSTANCE_CREATE:
7700
      if self.op.os_type is None:
7701
        raise errors.OpPrereqError("No guest OS specified",
7702
                                   errors.ECODE_INVAL)
7703
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
7704
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
7705
                                   " installation" % self.op.os_type,
7706
                                   errors.ECODE_STATE)
7707
      if self.op.disk_template is None:
7708
        raise errors.OpPrereqError("No disk template specified",
7709
                                   errors.ECODE_INVAL)
7710

    
7711
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
7712
      # Check handshake to ensure both clusters have the same domain secret
7713
      src_handshake = self.op.source_handshake
7714
      if not src_handshake:
7715
        raise errors.OpPrereqError("Missing source handshake",
7716
                                   errors.ECODE_INVAL)
7717

    
7718
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
7719
                                                           src_handshake)
7720
      if errmsg:
7721
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
7722
                                   errors.ECODE_INVAL)
7723

    
7724
      # Load and check source CA
7725
      self.source_x509_ca_pem = self.op.source_x509_ca
7726
      if not self.source_x509_ca_pem:
7727
        raise errors.OpPrereqError("Missing source X509 CA",
7728
                                   errors.ECODE_INVAL)
7729

    
7730
      try:
7731
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
7732
                                                    self._cds)
7733
      except OpenSSL.crypto.Error, err:
7734
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
7735
                                   (err, ), errors.ECODE_INVAL)
7736

    
7737
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
7738
      if errcode is not None:
7739
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
7740
                                   errors.ECODE_INVAL)
7741

    
7742
      self.source_x509_ca = cert
7743

    
7744
      src_instance_name = self.op.source_instance_name
7745
      if not src_instance_name:
7746
        raise errors.OpPrereqError("Missing source instance name",
7747
                                   errors.ECODE_INVAL)
7748

    
7749
      self.source_instance_name = \
7750
          netutils.GetHostname(name=src_instance_name).name
7751

    
7752
    else:
7753
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
7754
                                 self.op.mode, errors.ECODE_INVAL)
7755

    
7756
  def ExpandNames(self):
7757
    """ExpandNames for CreateInstance.
7758

7759
    Figure out the right locks for instance creation.
7760

7761
    """
7762
    self.needed_locks = {}
7763

    
7764
    instance_name = self.op.instance_name
7765
    # this is just a preventive check, but someone might still add this
7766
    # instance in the meantime, and creation will fail at lock-add time
7767
    if instance_name in self.cfg.GetInstanceList():
7768
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
7769
                                 instance_name, errors.ECODE_EXISTS)
7770

    
7771
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
7772

    
7773
    if self.op.iallocator:
7774
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7775
    else:
7776
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
7777
      nodelist = [self.op.pnode]
7778
      if self.op.snode is not None:
7779
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
7780
        nodelist.append(self.op.snode)
7781
      self.needed_locks[locking.LEVEL_NODE] = nodelist
7782

    
7783
    # in case of import lock the source node too
7784
    if self.op.mode == constants.INSTANCE_IMPORT:
7785
      src_node = self.op.src_node
7786
      src_path = self.op.src_path
7787

    
7788
      if src_path is None:
7789
        self.op.src_path = src_path = self.op.instance_name
7790

    
7791
      if src_node is None:
7792
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7793
        self.op.src_node = None
7794
        if os.path.isabs(src_path):
7795
          raise errors.OpPrereqError("Importing an instance from an absolute"
7796
                                     " path requires a source node option",
7797
                                     errors.ECODE_INVAL)
7798
      else:
7799
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
7800
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
7801
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
7802
        if not os.path.isabs(src_path):
7803
          self.op.src_path = src_path = \
7804
            utils.PathJoin(constants.EXPORT_DIR, src_path)
7805

    
7806
  def _RunAllocator(self):
7807
    """Run the allocator based on input opcode.
7808

7809
    """
7810
    nics = [n.ToDict() for n in self.nics]
7811
    ial = IAllocator(self.cfg, self.rpc,
7812
                     mode=constants.IALLOCATOR_MODE_ALLOC,
7813
                     name=self.op.instance_name,
7814
                     disk_template=self.op.disk_template,
7815
                     tags=[],
7816
                     os=self.op.os_type,
7817
                     vcpus=self.be_full[constants.BE_VCPUS],
7818
                     mem_size=self.be_full[constants.BE_MEMORY],
7819
                     disks=self.disks,
7820
                     nics=nics,
7821
                     hypervisor=self.op.hypervisor,
7822
                     )
7823

    
7824
    ial.Run(self.op.iallocator)
7825

    
7826
    if not ial.success:
7827
      raise errors.OpPrereqError("Can't compute nodes using"
7828
                                 " iallocator '%s': %s" %
7829
                                 (self.op.iallocator, ial.info),
7830
                                 errors.ECODE_NORES)
7831
    if len(ial.result) != ial.required_nodes:
7832
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7833
                                 " of nodes (%s), required %s" %
7834
                                 (self.op.iallocator, len(ial.result),
7835
                                  ial.required_nodes), errors.ECODE_FAULT)
7836
    self.op.pnode = ial.result[0]
7837
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
7838
                 self.op.instance_name, self.op.iallocator,
7839
                 utils.CommaJoin(ial.result))
7840
    if ial.required_nodes == 2:
7841
      self.op.snode = ial.result[1]
7842

    
7843
  def BuildHooksEnv(self):
7844
    """Build hooks env.
7845

7846
    This runs on master, primary and secondary nodes of the instance.
7847

7848
    """
7849
    env = {
7850
      "ADD_MODE": self.op.mode,
7851
      }
7852
    if self.op.mode == constants.INSTANCE_IMPORT:
7853
      env["SRC_NODE"] = self.op.src_node
7854
      env["SRC_PATH"] = self.op.src_path
7855
      env["SRC_IMAGES"] = self.src_images
7856

    
7857
    env.update(_BuildInstanceHookEnv(
7858
      name=self.op.instance_name,
7859
      primary_node=self.op.pnode,
7860
      secondary_nodes=self.secondaries,
7861
      status=self.op.start,
7862
      os_type=self.op.os_type,
7863
      memory=self.be_full[constants.BE_MEMORY],
7864
      vcpus=self.be_full[constants.BE_VCPUS],
7865
      nics=_NICListToTuple(self, self.nics),
7866
      disk_template=self.op.disk_template,
7867
      disks=[(d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
7868
             for d in self.disks],
7869
      bep=self.be_full,
7870
      hvp=self.hv_full,
7871
      hypervisor_name=self.op.hypervisor,
7872
    ))
7873

    
7874
    return env
7875

    
7876
  def BuildHooksNodes(self):
7877
    """Build hooks nodes.
7878

7879
    """
7880
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
7881
    return nl, nl
7882

    
7883
  def _ReadExportInfo(self):
7884
    """Reads the export information from disk.
7885

7886
    It will override the opcode source node and path with the actual
7887
    information, if these two were not specified before.
7888

7889
    @return: the export information
7890

7891
    """
7892
    assert self.op.mode == constants.INSTANCE_IMPORT
7893

    
7894
    src_node = self.op.src_node
7895
    src_path = self.op.src_path
7896

    
7897
    if src_node is None:
7898
      locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
7899
      exp_list = self.rpc.call_export_list(locked_nodes)
7900
      found = False
7901
      for node in exp_list:
7902
        if exp_list[node].fail_msg:
7903
          continue
7904
        if src_path in exp_list[node].payload:
7905
          found = True
7906
          self.op.src_node = src_node = node
7907
          self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
7908
                                                       src_path)
7909
          break
7910
      if not found:
7911
        raise errors.OpPrereqError("No export found for relative path %s" %
7912
                                    src_path, errors.ECODE_INVAL)
7913

    
7914
    _CheckNodeOnline(self, src_node)
7915
    result = self.rpc.call_export_info(src_node, src_path)
7916
    result.Raise("No export or invalid export found in dir %s" % src_path)
7917

    
7918
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
7919
    if not export_info.has_section(constants.INISECT_EXP):
7920
      raise errors.ProgrammerError("Corrupted export config",
7921
                                   errors.ECODE_ENVIRON)
7922

    
7923
    ei_version = export_info.get(constants.INISECT_EXP, "version")
7924
    if (int(ei_version) != constants.EXPORT_VERSION):
7925
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
7926
                                 (ei_version, constants.EXPORT_VERSION),
7927
                                 errors.ECODE_ENVIRON)
7928
    return export_info
7929

    
7930
  def _ReadExportParams(self, einfo):
7931
    """Use export parameters as defaults.
7932

7933
    In case the opcode doesn't specify (as in override) some instance
7934
    parameters, then try to use them from the export information, if
7935
    that declares them.
7936

7937
    """
7938
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
7939

    
7940
    if self.op.disk_template is None:
7941
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
7942
        self.op.disk_template = einfo.get(constants.INISECT_INS,
7943
                                          "disk_template")
7944
      else:
7945
        raise errors.OpPrereqError("No disk template specified and the export"
7946
                                   " is missing the disk_template information",
7947
                                   errors.ECODE_INVAL)
7948

    
7949
    if not self.op.disks:
7950
      if einfo.has_option(constants.INISECT_INS, "disk_count"):
7951
        disks = []
7952
        # TODO: import the disk iv_name too
7953
        for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
7954
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
7955
          disks.append({constants.IDISK_SIZE: disk_sz})
7956
        self.op.disks = disks
7957
      else:
7958
        raise errors.OpPrereqError("No disk info specified and the export"
7959
                                   " is missing the disk information",
7960
                                   errors.ECODE_INVAL)
7961

    
7962
    if (not self.op.nics and
7963
        einfo.has_option(constants.INISECT_INS, "nic_count")):
7964
      nics = []
7965
      for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
7966
        ndict = {}
7967
        for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
7968
          v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
7969
          ndict[name] = v
7970
        nics.append(ndict)
7971
      self.op.nics = nics
7972

    
7973
    if (self.op.hypervisor is None and
7974
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
7975
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
7976
    if einfo.has_section(constants.INISECT_HYP):
7977
      # use the export parameters but do not override the ones
7978
      # specified by the user
7979
      for name, value in einfo.items(constants.INISECT_HYP):
7980
        if name not in self.op.hvparams:
7981
          self.op.hvparams[name] = value
7982

    
7983
    if einfo.has_section(constants.INISECT_BEP):
7984
      # use the parameters, without overriding
7985
      for name, value in einfo.items(constants.INISECT_BEP):
7986
        if name not in self.op.beparams:
7987
          self.op.beparams[name] = value
7988
    else:
7989
      # try to read the parameters old style, from the main section
7990
      for name in constants.BES_PARAMETERS:
7991
        if (name not in self.op.beparams and
7992
            einfo.has_option(constants.INISECT_INS, name)):
7993
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
7994

    
7995
    if einfo.has_section(constants.INISECT_OSP):
7996
      # use the parameters, without overriding
7997
      for name, value in einfo.items(constants.INISECT_OSP):
7998
        if name not in self.op.osparams:
7999
          self.op.osparams[name] = value
8000

    
8001
  def _RevertToDefaults(self, cluster):
8002
    """Revert the instance parameters to the default values.
8003

8004
    """
8005
    # hvparams
8006
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
8007
    for name in self.op.hvparams.keys():
8008
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
8009
        del self.op.hvparams[name]
8010
    # beparams
8011
    be_defs = cluster.SimpleFillBE({})
8012
    for name in self.op.beparams.keys():
8013
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
8014
        del self.op.beparams[name]
8015
    # nic params
8016
    nic_defs = cluster.SimpleFillNIC({})
8017
    for nic in self.op.nics:
8018
      for name in constants.NICS_PARAMETERS:
8019
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
8020
          del nic[name]
8021
    # osparams
8022
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
8023
    for name in self.op.osparams.keys():
8024
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
8025
        del self.op.osparams[name]
8026

    
8027
  def CheckPrereq(self):
8028
    """Check prerequisites.
8029

8030
    """
8031
    if self.op.mode == constants.INSTANCE_IMPORT:
8032
      export_info = self._ReadExportInfo()
8033
      self._ReadExportParams(export_info)
8034

    
8035
    if (not self.cfg.GetVGName() and
8036
        self.op.disk_template not in constants.DTS_NOT_LVM):
8037
      raise errors.OpPrereqError("Cluster does not support lvm-based"
8038
                                 " instances", errors.ECODE_STATE)
8039

    
8040
    if self.op.hypervisor is None:
8041
      self.op.hypervisor = self.cfg.GetHypervisorType()
8042

    
8043
    cluster = self.cfg.GetClusterInfo()
8044
    enabled_hvs = cluster.enabled_hypervisors
8045
    if self.op.hypervisor not in enabled_hvs:
8046
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
8047
                                 " cluster (%s)" % (self.op.hypervisor,
8048
                                  ",".join(enabled_hvs)),
8049
                                 errors.ECODE_STATE)
8050

    
8051
    # check hypervisor parameter syntax (locally)
8052
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
8053
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
8054
                                      self.op.hvparams)
8055
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
8056
    hv_type.CheckParameterSyntax(filled_hvp)
8057
    self.hv_full = filled_hvp
8058
    # check that we don't specify global parameters on an instance
8059
    _CheckGlobalHvParams(self.op.hvparams)
8060

    
8061
    # fill and remember the beparams dict
8062
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
8063
    self.be_full = cluster.SimpleFillBE(self.op.beparams)
8064

    
8065
    # build os parameters
8066
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
8067

    
8068
    # now that hvp/bep are in final format, let's reset to defaults,
8069
    # if told to do so
8070
    if self.op.identify_defaults:
8071
      self._RevertToDefaults(cluster)
8072

    
8073
    # NIC buildup
8074
    self.nics = []
8075
    for idx, nic in enumerate(self.op.nics):
8076
      nic_mode_req = nic.get(constants.INIC_MODE, None)
8077
      nic_mode = nic_mode_req
8078
      if nic_mode is None:
8079
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
8080

    
8081
      # in routed mode, for the first nic, the default ip is 'auto'
8082
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
8083
        default_ip_mode = constants.VALUE_AUTO
8084
      else:
8085
        default_ip_mode = constants.VALUE_NONE
8086

    
8087
      # ip validity checks
8088
      ip = nic.get(constants.INIC_IP, default_ip_mode)
8089
      if ip is None or ip.lower() == constants.VALUE_NONE:
8090
        nic_ip = None
8091
      elif ip.lower() == constants.VALUE_AUTO:
8092
        if not self.op.name_check:
8093
          raise errors.OpPrereqError("IP address set to auto but name checks"
8094
                                     " have been skipped",
8095
                                     errors.ECODE_INVAL)
8096
        nic_ip = self.hostname1.ip
8097
      else:
8098
        if not netutils.IPAddress.IsValid(ip):
8099
          raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
8100
                                     errors.ECODE_INVAL)
8101
        nic_ip = ip
8102

    
8103
      # TODO: check the ip address for uniqueness
8104
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
8105
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
8106
                                   errors.ECODE_INVAL)
8107

    
8108
      # MAC address verification
8109
      mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
8110
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8111
        mac = utils.NormalizeAndValidateMac(mac)
8112

    
8113
        try:
8114
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
8115
        except errors.ReservationError:
8116
          raise errors.OpPrereqError("MAC address %s already in use"
8117
                                     " in cluster" % mac,
8118
                                     errors.ECODE_NOTUNIQUE)
8119

    
8120
      #  Build nic parameters
8121
      link = nic.get(constants.INIC_LINK, None)
8122
      nicparams = {}
8123
      if nic_mode_req:
8124
        nicparams[constants.NIC_MODE] = nic_mode_req
8125
      if link:
8126
        nicparams[constants.NIC_LINK] = link
8127

    
8128
      check_params = cluster.SimpleFillNIC(nicparams)
8129
      objects.NIC.CheckParameterSyntax(check_params)
8130
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
8131

    
8132
    # disk checks/pre-build
8133
    default_vg = self.cfg.GetVGName()
8134
    self.disks = []
8135
    for disk in self.op.disks:
8136
      mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
8137
      if mode not in constants.DISK_ACCESS_SET:
8138
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
8139
                                   mode, errors.ECODE_INVAL)
8140
      size = disk.get(constants.IDISK_SIZE, None)
8141
      if size is None:
8142
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
8143
      try:
8144
        size = int(size)
8145
      except (TypeError, ValueError):
8146
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
8147
                                   errors.ECODE_INVAL)
8148

    
8149
      data_vg = disk.get(constants.IDISK_VG, default_vg)
8150
      new_disk = {
8151
        constants.IDISK_SIZE: size,
8152
        constants.IDISK_MODE: mode,
8153
        constants.IDISK_VG: data_vg,
8154
        constants.IDISK_METAVG: disk.get(constants.IDISK_METAVG, data_vg),
8155
        }
8156
      if constants.IDISK_ADOPT in disk:
8157
        new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
8158
      self.disks.append(new_disk)
8159

    
8160
    if self.op.mode == constants.INSTANCE_IMPORT:
8161

    
8162
      # Check that the new instance doesn't have less disks than the export
8163
      instance_disks = len(self.disks)
8164
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
8165
      if instance_disks < export_disks:
8166
        raise errors.OpPrereqError("Not enough disks to import."
8167
                                   " (instance: %d, export: %d)" %
8168
                                   (instance_disks, export_disks),
8169
                                   errors.ECODE_INVAL)
8170

    
8171
      disk_images = []
8172
      for idx in range(export_disks):
8173
        option = 'disk%d_dump' % idx
8174
        if export_info.has_option(constants.INISECT_INS, option):
8175
          # FIXME: are the old os-es, disk sizes, etc. useful?
8176
          export_name = export_info.get(constants.INISECT_INS, option)
8177
          image = utils.PathJoin(self.op.src_path, export_name)
8178
          disk_images.append(image)
8179
        else:
8180
          disk_images.append(False)
8181

    
8182
      self.src_images = disk_images
8183

    
8184
      old_name = export_info.get(constants.INISECT_INS, 'name')
8185
      try:
8186
        exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
8187
      except (TypeError, ValueError), err:
8188
        raise errors.OpPrereqError("Invalid export file, nic_count is not"
8189
                                   " an integer: %s" % str(err),
8190
                                   errors.ECODE_STATE)
8191
      if self.op.instance_name == old_name:
8192
        for idx, nic in enumerate(self.nics):
8193
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
8194
            nic_mac_ini = 'nic%d_mac' % idx
8195
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
8196

    
8197
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
8198

    
8199
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
8200
    if self.op.ip_check:
8201
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
8202
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
8203
                                   (self.check_ip, self.op.instance_name),
8204
                                   errors.ECODE_NOTUNIQUE)
8205

    
8206
    #### mac address generation
8207
    # By generating here the mac address both the allocator and the hooks get
8208
    # the real final mac address rather than the 'auto' or 'generate' value.
8209
    # There is a race condition between the generation and the instance object
8210
    # creation, which means that we know the mac is valid now, but we're not
8211
    # sure it will be when we actually add the instance. If things go bad
8212
    # adding the instance will abort because of a duplicate mac, and the
8213
    # creation job will fail.
8214
    for nic in self.nics:
8215
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8216
        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
8217

    
8218
    #### allocator run
8219

    
8220
    if self.op.iallocator is not None:
8221
      self._RunAllocator()
8222

    
8223
    #### node related checks
8224

    
8225
    # check primary node
8226
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
8227
    assert self.pnode is not None, \
8228
      "Cannot retrieve locked node %s" % self.op.pnode
8229
    if pnode.offline:
8230
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
8231
                                 pnode.name, errors.ECODE_STATE)
8232
    if pnode.drained:
8233
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
8234
                                 pnode.name, errors.ECODE_STATE)
8235
    if not pnode.vm_capable:
8236
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
8237
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
8238

    
8239
    self.secondaries = []
8240

    
8241
    # mirror node verification
8242
    if self.op.disk_template in constants.DTS_INT_MIRROR:
8243
      if self.op.snode == pnode.name:
8244
        raise errors.OpPrereqError("The secondary node cannot be the"
8245
                                   " primary node", errors.ECODE_INVAL)
8246
      _CheckNodeOnline(self, self.op.snode)
8247
      _CheckNodeNotDrained(self, self.op.snode)
8248
      _CheckNodeVmCapable(self, self.op.snode)
8249
      self.secondaries.append(self.op.snode)
8250

    
8251
    nodenames = [pnode.name] + self.secondaries
8252

    
8253
    if not self.adopt_disks:
8254
      # Check lv size requirements, if not adopting
8255
      req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
8256
      _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
8257

    
8258
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
8259
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
8260
                                disk[constants.IDISK_ADOPT])
8261
                     for disk in self.disks])
8262
      if len(all_lvs) != len(self.disks):
8263
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
8264
                                   errors.ECODE_INVAL)
8265
      for lv_name in all_lvs:
8266
        try:
8267
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
8268
          # to ReserveLV uses the same syntax
8269
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
8270
        except errors.ReservationError:
8271
          raise errors.OpPrereqError("LV named %s used by another instance" %
8272
                                     lv_name, errors.ECODE_NOTUNIQUE)
8273

    
8274
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
8275
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
8276

    
8277
      node_lvs = self.rpc.call_lv_list([pnode.name],
8278
                                       vg_names.payload.keys())[pnode.name]
8279
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
8280
      node_lvs = node_lvs.payload
8281

    
8282
      delta = all_lvs.difference(node_lvs.keys())
8283
      if delta:
8284
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
8285
                                   utils.CommaJoin(delta),
8286
                                   errors.ECODE_INVAL)
8287
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
8288
      if online_lvs:
8289
        raise errors.OpPrereqError("Online logical volumes found, cannot"
8290
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
8291
                                   errors.ECODE_STATE)
8292
      # update the size of disk based on what is found
8293
      for dsk in self.disks:
8294
        dsk[constants.IDISK_SIZE] = \
8295
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
8296
                                        dsk[constants.IDISK_ADOPT])][0]))
8297

    
8298
    elif self.op.disk_template == constants.DT_BLOCK:
8299
      # Normalize and de-duplicate device paths
8300
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
8301
                       for disk in self.disks])
8302
      if len(all_disks) != len(self.disks):
8303
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
8304
                                   errors.ECODE_INVAL)
8305
      baddisks = [d for d in all_disks
8306
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
8307
      if baddisks:
8308
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
8309
                                   " cannot be adopted" %
8310
                                   (", ".join(baddisks),
8311
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
8312
                                   errors.ECODE_INVAL)
8313

    
8314
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
8315
                                            list(all_disks))[pnode.name]
8316
      node_disks.Raise("Cannot get block device information from node %s" %
8317
                       pnode.name)
8318
      node_disks = node_disks.payload
8319
      delta = all_disks.difference(node_disks.keys())
8320
      if delta:
8321
        raise errors.OpPrereqError("Missing block device(s): %s" %
8322
                                   utils.CommaJoin(delta),
8323
                                   errors.ECODE_INVAL)
8324
      for dsk in self.disks:
8325
        dsk[constants.IDISK_SIZE] = \
8326
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
8327

    
8328
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
8329

    
8330
    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
8331
    # check OS parameters (remotely)
8332
    _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
8333

    
8334
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
8335

    
8336
    # memory check on primary node
8337
    if self.op.start:
8338
      _CheckNodeFreeMemory(self, self.pnode.name,
8339
                           "creating instance %s" % self.op.instance_name,
8340
                           self.be_full[constants.BE_MEMORY],
8341
                           self.op.hypervisor)
8342

    
8343
    self.dry_run_result = list(nodenames)
8344

    
8345
  def Exec(self, feedback_fn):
8346
    """Create and add the instance to the cluster.
8347

8348
    """
8349
    instance = self.op.instance_name
8350
    pnode_name = self.pnode.name
8351

    
8352
    ht_kind = self.op.hypervisor
8353
    if ht_kind in constants.HTS_REQ_PORT:
8354
      network_port = self.cfg.AllocatePort()
8355
    else:
8356
      network_port = None
8357

    
8358
    if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
8359
      # this is needed because os.path.join does not accept None arguments
8360
      if self.op.file_storage_dir is None:
8361
        string_file_storage_dir = ""
8362
      else:
8363
        string_file_storage_dir = self.op.file_storage_dir
8364

    
8365
      # build the full file storage dir path
8366
      if self.op.disk_template == constants.DT_SHARED_FILE:
8367
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
8368
      else:
8369
        get_fsd_fn = self.cfg.GetFileStorageDir
8370

    
8371
      file_storage_dir = utils.PathJoin(get_fsd_fn(),
8372
                                        string_file_storage_dir, instance)
8373
    else:
8374
      file_storage_dir = ""
8375

    
8376
    disks = _GenerateDiskTemplate(self,
8377
                                  self.op.disk_template,
8378
                                  instance, pnode_name,
8379
                                  self.secondaries,
8380
                                  self.disks,
8381
                                  file_storage_dir,
8382
                                  self.op.file_driver,
8383
                                  0,
8384
                                  feedback_fn)
8385

    
8386
    iobj = objects.Instance(name=instance, os=self.op.os_type,
8387
                            primary_node=pnode_name,
8388
                            nics=self.nics, disks=disks,
8389
                            disk_template=self.op.disk_template,
8390
                            admin_up=False,
8391
                            network_port=network_port,
8392
                            beparams=self.op.beparams,
8393
                            hvparams=self.op.hvparams,
8394
                            hypervisor=self.op.hypervisor,
8395
                            osparams=self.op.osparams,
8396
                            )
8397

    
8398
    if self.adopt_disks:
8399
      if self.op.disk_template == constants.DT_PLAIN:
8400
        # rename LVs to the newly-generated names; we need to construct
8401
        # 'fake' LV disks with the old data, plus the new unique_id
8402
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
8403
        rename_to = []
8404
        for t_dsk, a_dsk in zip (tmp_disks, self.disks):
8405
          rename_to.append(t_dsk.logical_id)
8406
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
8407
          self.cfg.SetDiskID(t_dsk, pnode_name)
8408
        result = self.rpc.call_blockdev_rename(pnode_name,
8409
                                               zip(tmp_disks, rename_to))
8410
        result.Raise("Failed to rename adoped LVs")
8411
    else:
8412
      feedback_fn("* creating instance disks...")
8413
      try:
8414
        _CreateDisks(self, iobj)
8415
      except errors.OpExecError:
8416
        self.LogWarning("Device creation failed, reverting...")
8417
        try:
8418
          _RemoveDisks(self, iobj)
8419
        finally:
8420
          self.cfg.ReleaseDRBDMinors(instance)
8421
          raise
8422

    
8423
    feedback_fn("adding instance %s to cluster config" % instance)
8424

    
8425
    self.cfg.AddInstance(iobj, self.proc.GetECId())
8426

    
8427
    # Declare that we don't want to remove the instance lock anymore, as we've
8428
    # added the instance to the config
8429
    del self.remove_locks[locking.LEVEL_INSTANCE]
8430

    
8431
    if self.op.mode == constants.INSTANCE_IMPORT:
8432
      # Release unused nodes
8433
      _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
8434
    else:
8435
      # Release all nodes
8436
      _ReleaseLocks(self, locking.LEVEL_NODE)
8437

    
8438
    disk_abort = False
8439
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
8440
      feedback_fn("* wiping instance disks...")
8441
      try:
8442
        _WipeDisks(self, iobj)
8443
      except errors.OpExecError, err:
8444
        logging.exception("Wiping disks failed")
8445
        self.LogWarning("Wiping instance disks failed (%s)", err)
8446
        disk_abort = True
8447

    
8448
    if disk_abort:
8449
      # Something is already wrong with the disks, don't do anything else
8450
      pass
8451
    elif self.op.wait_for_sync:
8452
      disk_abort = not _WaitForSync(self, iobj)
8453
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
8454
      # make sure the disks are not degraded (still sync-ing is ok)
8455
      time.sleep(15)
8456
      feedback_fn("* checking mirrors status")
8457
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
8458
    else:
8459
      disk_abort = False
8460

    
8461
    if disk_abort:
8462
      _RemoveDisks(self, iobj)
8463
      self.cfg.RemoveInstance(iobj.name)
8464
      # Make sure the instance lock gets removed
8465
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
8466
      raise errors.OpExecError("There are some degraded disks for"
8467
                               " this instance")
8468

    
8469
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
8470
      if self.op.mode == constants.INSTANCE_CREATE:
8471
        if not self.op.no_install:
8472
          feedback_fn("* running the instance OS create scripts...")
8473
          # FIXME: pass debug option from opcode to backend
8474
          result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
8475
                                                 self.op.debug_level)
8476
          result.Raise("Could not add os for instance %s"
8477
                       " on node %s" % (instance, pnode_name))
8478

    
8479
      elif self.op.mode == constants.INSTANCE_IMPORT:
8480
        feedback_fn("* running the instance OS import scripts...")
8481

    
8482
        transfers = []
8483

    
8484
        for idx, image in enumerate(self.src_images):
8485
          if not image:
8486
            continue
8487

    
8488
          # FIXME: pass debug option from opcode to backend
8489
          dt = masterd.instance.DiskTransfer("disk/%s" % idx,
8490
                                             constants.IEIO_FILE, (image, ),
8491
                                             constants.IEIO_SCRIPT,
8492
                                             (iobj.disks[idx], idx),
8493
                                             None)
8494
          transfers.append(dt)
8495

    
8496
        import_result = \
8497
          masterd.instance.TransferInstanceData(self, feedback_fn,
8498
                                                self.op.src_node, pnode_name,
8499
                                                self.pnode.secondary_ip,
8500
                                                iobj, transfers)
8501
        if not compat.all(import_result):
8502
          self.LogWarning("Some disks for instance %s on node %s were not"
8503
                          " imported successfully" % (instance, pnode_name))
8504

    
8505
      elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
8506
        feedback_fn("* preparing remote import...")
8507
        # The source cluster will stop the instance before attempting to make a
8508
        # connection. In some cases stopping an instance can take a long time,
8509
        # hence the shutdown timeout is added to the connection timeout.
8510
        connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
8511
                           self.op.source_shutdown_timeout)
8512
        timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
8513

    
8514
        assert iobj.primary_node == self.pnode.name
8515
        disk_results = \
8516
          masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
8517
                                        self.source_x509_ca,
8518
                                        self._cds, timeouts)
8519
        if not compat.all(disk_results):
8520
          # TODO: Should the instance still be started, even if some disks
8521
          # failed to import (valid for local imports, too)?
8522
          self.LogWarning("Some disks for instance %s on node %s were not"
8523
                          " imported successfully" % (instance, pnode_name))
8524

    
8525
        # Run rename script on newly imported instance
8526
        assert iobj.name == instance
8527
        feedback_fn("Running rename script for %s" % instance)
8528
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
8529
                                                   self.source_instance_name,
8530
                                                   self.op.debug_level)
8531
        if result.fail_msg:
8532
          self.LogWarning("Failed to run rename script for %s on node"
8533
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
8534

    
8535
      else:
8536
        # also checked in the prereq part
8537
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
8538
                                     % self.op.mode)
8539

    
8540
    if self.op.start:
8541
      iobj.admin_up = True
8542
      self.cfg.Update(iobj, feedback_fn)
8543
      logging.info("Starting instance %s on node %s", instance, pnode_name)
8544
      feedback_fn("* starting instance...")
8545
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
8546
      result.Raise("Could not start instance")
8547

    
8548
    return list(iobj.all_nodes)
8549

    
8550

    
8551
class LUInstanceConsole(NoHooksLU):
8552
  """Connect to an instance's console.
8553

8554
  This is somewhat special in that it returns the command line that
8555
  you need to run on the master node in order to connect to the
8556
  console.
8557

8558
  """
8559
  REQ_BGL = False
8560

    
8561
  def ExpandNames(self):
8562
    self._ExpandAndLockInstance()
8563

    
8564
  def CheckPrereq(self):
8565
    """Check prerequisites.
8566

8567
    This checks that the instance is in the cluster.
8568

8569
    """
8570
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8571
    assert self.instance is not None, \
8572
      "Cannot retrieve locked instance %s" % self.op.instance_name
8573
    _CheckNodeOnline(self, self.instance.primary_node)
8574

    
8575
  def Exec(self, feedback_fn):
8576
    """Connect to the console of an instance
8577

8578
    """
8579
    instance = self.instance
8580
    node = instance.primary_node
8581

    
8582
    node_insts = self.rpc.call_instance_list([node],
8583
                                             [instance.hypervisor])[node]
8584
    node_insts.Raise("Can't get node information from %s" % node)
8585

    
8586
    if instance.name not in node_insts.payload:
8587
      if instance.admin_up:
8588
        state = constants.INSTST_ERRORDOWN
8589
      else:
8590
        state = constants.INSTST_ADMINDOWN
8591
      raise errors.OpExecError("Instance %s is not running (state %s)" %
8592
                               (instance.name, state))
8593

    
8594
    logging.debug("Connecting to console of %s on %s", instance.name, node)
8595

    
8596
    return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
8597

    
8598

    
8599
def _GetInstanceConsole(cluster, instance):
8600
  """Returns console information for an instance.
8601

8602
  @type cluster: L{objects.Cluster}
8603
  @type instance: L{objects.Instance}
8604
  @rtype: dict
8605

8606
  """
8607
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
8608
  # beparams and hvparams are passed separately, to avoid editing the
8609
  # instance and then saving the defaults in the instance itself.
8610
  hvparams = cluster.FillHV(instance)
8611
  beparams = cluster.FillBE(instance)
8612
  console = hyper.GetInstanceConsole(instance, hvparams, beparams)
8613

    
8614
  assert console.instance == instance.name
8615
  assert console.Validate()
8616

    
8617
  return console.ToDict()
8618

    
8619

    
8620
class LUInstanceReplaceDisks(LogicalUnit):
8621
  """Replace the disks of an instance.
8622

8623
  """
8624
  HPATH = "mirrors-replace"
8625
  HTYPE = constants.HTYPE_INSTANCE
8626
  REQ_BGL = False
8627

    
8628
  def CheckArguments(self):
8629
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
8630
                                  self.op.iallocator)
8631

    
8632
  def ExpandNames(self):
8633
    self._ExpandAndLockInstance()
8634

    
8635
    assert locking.LEVEL_NODE not in self.needed_locks
8636
    assert locking.LEVEL_NODEGROUP not in self.needed_locks
8637

    
8638
    assert self.op.iallocator is None or self.op.remote_node is None, \
8639
      "Conflicting options"
8640

    
8641
    if self.op.remote_node is not None:
8642
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8643

    
8644
      # Warning: do not remove the locking of the new secondary here
8645
      # unless DRBD8.AddChildren is changed to work in parallel;
8646
      # currently it doesn't since parallel invocations of
8647
      # FindUnusedMinor will conflict
8648
      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
8649
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
8650
    else:
8651
      self.needed_locks[locking.LEVEL_NODE] = []
8652
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8653

    
8654
      if self.op.iallocator is not None:
8655
        # iallocator will select a new node in the same group
8656
        self.needed_locks[locking.LEVEL_NODEGROUP] = []
8657

    
8658
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
8659
                                   self.op.iallocator, self.op.remote_node,
8660
                                   self.op.disks, False, self.op.early_release)
8661

    
8662
    self.tasklets = [self.replacer]
8663

    
8664
  def DeclareLocks(self, level):
8665
    if level == locking.LEVEL_NODEGROUP:
8666
      assert self.op.remote_node is None
8667
      assert self.op.iallocator is not None
8668
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
8669

    
8670
      self.share_locks[locking.LEVEL_NODEGROUP] = 1
8671
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
8672
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
8673

    
8674
    elif level == locking.LEVEL_NODE:
8675
      if self.op.iallocator is not None:
8676
        assert self.op.remote_node is None
8677
        assert not self.needed_locks[locking.LEVEL_NODE]
8678

    
8679
        # Lock member nodes of all locked groups
8680
        self.needed_locks[locking.LEVEL_NODE] = [node_name
8681
          for group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
8682
          for node_name in self.cfg.GetNodeGroup(group_uuid).members]
8683
      else:
8684
        self._LockInstancesNodes()
8685

    
8686
  def BuildHooksEnv(self):
8687
    """Build hooks env.
8688

8689
    This runs on the master, the primary and all the secondaries.
8690

8691
    """
8692
    instance = self.replacer.instance
8693
    env = {
8694
      "MODE": self.op.mode,
8695
      "NEW_SECONDARY": self.op.remote_node,
8696
      "OLD_SECONDARY": instance.secondary_nodes[0],
8697
      }
8698
    env.update(_BuildInstanceHookEnvByObject(self, instance))
8699
    return env
8700

    
8701
  def BuildHooksNodes(self):
8702
    """Build hooks nodes.
8703

8704
    """
8705
    instance = self.replacer.instance
8706
    nl = [
8707
      self.cfg.GetMasterNode(),
8708
      instance.primary_node,
8709
      ]
8710
    if self.op.remote_node is not None:
8711
      nl.append(self.op.remote_node)
8712
    return nl, nl
8713

    
8714
  def CheckPrereq(self):
8715
    """Check prerequisites.
8716

8717
    """
8718
    assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
8719
            self.op.iallocator is None)
8720

    
8721
    owned_groups = self.glm.list_owned(locking.LEVEL_NODEGROUP)
8722
    if owned_groups:
8723
      groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
8724
      if owned_groups != groups:
8725
        raise errors.OpExecError("Node groups used by instance '%s' changed"
8726
                                 " since lock was acquired, current list is %r,"
8727
                                 " used to be '%s'" %
8728
                                 (self.op.instance_name,
8729
                                  utils.CommaJoin(groups),
8730
                                  utils.CommaJoin(owned_groups)))
8731

    
8732
    return LogicalUnit.CheckPrereq(self)
8733

    
8734

    
8735
class TLReplaceDisks(Tasklet):
8736
  """Replaces disks for an instance.
8737

8738
  Note: Locking is not within the scope of this class.
8739

8740
  """
8741
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
8742
               disks, delay_iallocator, early_release):
8743
    """Initializes this class.
8744

8745
    """
8746
    Tasklet.__init__(self, lu)
8747

    
8748
    # Parameters
8749
    self.instance_name = instance_name
8750
    self.mode = mode
8751
    self.iallocator_name = iallocator_name
8752
    self.remote_node = remote_node
8753
    self.disks = disks
8754
    self.delay_iallocator = delay_iallocator
8755
    self.early_release = early_release
8756

    
8757
    # Runtime data
8758
    self.instance = None
8759
    self.new_node = None
8760
    self.target_node = None
8761
    self.other_node = None
8762
    self.remote_node_info = None
8763
    self.node_secondary_ip = None
8764

    
8765
  @staticmethod
8766
  def CheckArguments(mode, remote_node, iallocator):
8767
    """Helper function for users of this class.
8768

8769
    """
8770
    # check for valid parameter combination
8771
    if mode == constants.REPLACE_DISK_CHG:
8772
      if remote_node is None and iallocator is None:
8773
        raise errors.OpPrereqError("When changing the secondary either an"
8774
                                   " iallocator script must be used or the"
8775
                                   " new node given", errors.ECODE_INVAL)
8776

    
8777
      if remote_node is not None and iallocator is not None:
8778
        raise errors.OpPrereqError("Give either the iallocator or the new"
8779
                                   " secondary, not both", errors.ECODE_INVAL)
8780

    
8781
    elif remote_node is not None or iallocator is not None:
8782
      # Not replacing the secondary
8783
      raise errors.OpPrereqError("The iallocator and new node options can"
8784
                                 " only be used when changing the"
8785
                                 " secondary node", errors.ECODE_INVAL)
8786

    
8787
  @staticmethod
8788
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
8789
    """Compute a new secondary node using an IAllocator.
8790

8791
    """
8792
    ial = IAllocator(lu.cfg, lu.rpc,
8793
                     mode=constants.IALLOCATOR_MODE_RELOC,
8794
                     name=instance_name,
8795
                     relocate_from=relocate_from)
8796

    
8797
    ial.Run(iallocator_name)
8798

    
8799
    if not ial.success:
8800
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
8801
                                 " %s" % (iallocator_name, ial.info),
8802
                                 errors.ECODE_NORES)
8803

    
8804
    if len(ial.result) != ial.required_nodes:
8805
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8806
                                 " of nodes (%s), required %s" %
8807
                                 (iallocator_name,
8808
                                  len(ial.result), ial.required_nodes),
8809
                                 errors.ECODE_FAULT)
8810

    
8811
    remote_node_name = ial.result[0]
8812

    
8813
    lu.LogInfo("Selected new secondary for instance '%s': %s",
8814
               instance_name, remote_node_name)
8815

    
8816
    return remote_node_name
8817

    
8818
  def _FindFaultyDisks(self, node_name):
8819
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
8820
                                    node_name, True)
8821

    
8822
  def _CheckDisksActivated(self, instance):
8823
    """Checks if the instance disks are activated.
8824

8825
    @param instance: The instance to check disks
8826
    @return: True if they are activated, False otherwise
8827

8828
    """
8829
    nodes = instance.all_nodes
8830

    
8831
    for idx, dev in enumerate(instance.disks):
8832
      for node in nodes:
8833
        self.lu.LogInfo("Checking disk/%d on %s", idx, node)
8834
        self.cfg.SetDiskID(dev, node)
8835

    
8836
        result = self.rpc.call_blockdev_find(node, dev)
8837

    
8838
        if result.offline:
8839
          continue
8840
        elif result.fail_msg or not result.payload:
8841
          return False
8842

    
8843
    return True
8844

    
8845
  def CheckPrereq(self):
8846
    """Check prerequisites.
8847

8848
    This checks that the instance is in the cluster.
8849

8850
    """
8851
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
8852
    assert instance is not None, \
8853
      "Cannot retrieve locked instance %s" % self.instance_name
8854

    
8855
    if instance.disk_template != constants.DT_DRBD8:
8856
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
8857
                                 " instances", errors.ECODE_INVAL)
8858

    
8859
    if len(instance.secondary_nodes) != 1:
8860
      raise errors.OpPrereqError("The instance has a strange layout,"
8861
                                 " expected one secondary but found %d" %
8862
                                 len(instance.secondary_nodes),
8863
                                 errors.ECODE_FAULT)
8864

    
8865
    if not self.delay_iallocator:
8866
      self._CheckPrereq2()
8867

    
8868
  def _CheckPrereq2(self):
8869
    """Check prerequisites, second part.
8870

8871
    This function should always be part of CheckPrereq. It was separated and is
8872
    now called from Exec because during node evacuation iallocator was only
8873
    called with an unmodified cluster model, not taking planned changes into
8874
    account.
8875

8876
    """
8877
    instance = self.instance
8878
    secondary_node = instance.secondary_nodes[0]
8879

    
8880
    if self.iallocator_name is None:
8881
      remote_node = self.remote_node
8882
    else:
8883
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
8884
                                       instance.name, instance.secondary_nodes)
8885

    
8886
    if remote_node is None:
8887
      self.remote_node_info = None
8888
    else:
8889
      assert remote_node in self.lu.glm.list_owned(locking.LEVEL_NODE), \
8890
             "Remote node '%s' is not locked" % remote_node
8891

    
8892
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
8893
      assert self.remote_node_info is not None, \
8894
        "Cannot retrieve locked node %s" % remote_node
8895

    
8896
    if remote_node == self.instance.primary_node:
8897
      raise errors.OpPrereqError("The specified node is the primary node of"
8898
                                 " the instance", errors.ECODE_INVAL)
8899

    
8900
    if remote_node == secondary_node:
8901
      raise errors.OpPrereqError("The specified node is already the"
8902
                                 " secondary node of the instance",
8903
                                 errors.ECODE_INVAL)
8904

    
8905
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
8906
                                    constants.REPLACE_DISK_CHG):
8907
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
8908
                                 errors.ECODE_INVAL)
8909

    
8910
    if self.mode == constants.REPLACE_DISK_AUTO:
8911
      if not self._CheckDisksActivated(instance):
8912
        raise errors.OpPrereqError("Please run activate-disks on instance %s"
8913
                                   " first" % self.instance_name,
8914
                                   errors.ECODE_STATE)
8915
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
8916
      faulty_secondary = self._FindFaultyDisks(secondary_node)
8917

    
8918
      if faulty_primary and faulty_secondary:
8919
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
8920
                                   " one node and can not be repaired"
8921
                                   " automatically" % self.instance_name,
8922
                                   errors.ECODE_STATE)
8923

    
8924
      if faulty_primary:
8925
        self.disks = faulty_primary
8926
        self.target_node = instance.primary_node
8927
        self.other_node = secondary_node
8928
        check_nodes = [self.target_node, self.other_node]
8929
      elif faulty_secondary:
8930
        self.disks = faulty_secondary
8931
        self.target_node = secondary_node
8932
        self.other_node = instance.primary_node
8933
        check_nodes = [self.target_node, self.other_node]
8934
      else:
8935
        self.disks = []
8936
        check_nodes = []
8937

    
8938
    else:
8939
      # Non-automatic modes
8940
      if self.mode == constants.REPLACE_DISK_PRI:
8941
        self.target_node = instance.primary_node
8942
        self.other_node = secondary_node
8943
        check_nodes = [self.target_node, self.other_node]
8944

    
8945
      elif self.mode == constants.REPLACE_DISK_SEC:
8946
        self.target_node = secondary_node
8947
        self.other_node = instance.primary_node
8948
        check_nodes = [self.target_node, self.other_node]
8949

    
8950
      elif self.mode == constants.REPLACE_DISK_CHG:
8951
        self.new_node = remote_node
8952
        self.other_node = instance.primary_node
8953
        self.target_node = secondary_node
8954
        check_nodes = [self.new_node, self.other_node]
8955

    
8956
        _CheckNodeNotDrained(self.lu, remote_node)
8957
        _CheckNodeVmCapable(self.lu, remote_node)
8958

    
8959
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
8960
        assert old_node_info is not None
8961
        if old_node_info.offline and not self.early_release:
8962
          # doesn't make sense to delay the release
8963
          self.early_release = True
8964
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
8965
                          " early-release mode", secondary_node)
8966

    
8967
      else:
8968
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
8969
                                     self.mode)
8970

    
8971
      # If not specified all disks should be replaced
8972
      if not self.disks:
8973
        self.disks = range(len(self.instance.disks))
8974

    
8975
    for node in check_nodes:
8976
      _CheckNodeOnline(self.lu, node)
8977

    
8978
    touched_nodes = frozenset(node_name for node_name in [self.new_node,
8979
                                                          self.other_node,
8980
                                                          self.target_node]
8981
                              if node_name is not None)
8982

    
8983
    # Release unneeded node locks
8984
    _ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
8985

    
8986
    # Release any owned node group
8987
    if self.lu.glm.is_owned(locking.LEVEL_NODEGROUP):
8988
      _ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
8989

    
8990
    # Check whether disks are valid
8991
    for disk_idx in self.disks:
8992
      instance.FindDisk(disk_idx)
8993

    
8994
    # Get secondary node IP addresses
8995
    self.node_secondary_ip = \
8996
      dict((node_name, self.cfg.GetNodeInfo(node_name).secondary_ip)
8997
           for node_name in touched_nodes)
8998

    
8999
  def Exec(self, feedback_fn):
9000
    """Execute disk replacement.
9001

9002
    This dispatches the disk replacement to the appropriate handler.
9003

9004
    """
9005
    if self.delay_iallocator:
9006
      self._CheckPrereq2()
9007

    
9008
    if __debug__:
9009
      # Verify owned locks before starting operation
9010
      owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
9011
      assert set(owned_locks) == set(self.node_secondary_ip), \
9012
          ("Incorrect node locks, owning %s, expected %s" %
9013
           (owned_locks, self.node_secondary_ip.keys()))
9014

    
9015
      owned_locks = self.lu.glm.list_owned(locking.LEVEL_INSTANCE)
9016
      assert list(owned_locks) == [self.instance_name], \
9017
          "Instance '%s' not locked" % self.instance_name
9018

    
9019
      assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
9020
          "Should not own any node group lock at this point"
9021

    
9022
    if not self.disks:
9023
      feedback_fn("No disks need replacement")
9024
      return
9025

    
9026
    feedback_fn("Replacing disk(s) %s for %s" %
9027
                (utils.CommaJoin(self.disks), self.instance.name))
9028

    
9029
    activate_disks = (not self.instance.admin_up)
9030

    
9031
    # Activate the instance disks if we're replacing them on a down instance
9032
    if activate_disks:
9033
      _StartInstanceDisks(self.lu, self.instance, True)
9034

    
9035
    try:
9036
      # Should we replace the secondary node?
9037
      if self.new_node is not None:
9038
        fn = self._ExecDrbd8Secondary
9039
      else:
9040
        fn = self._ExecDrbd8DiskOnly
9041

    
9042
      result = fn(feedback_fn)
9043
    finally:
9044
      # Deactivate the instance disks if we're replacing them on a
9045
      # down instance
9046
      if activate_disks:
9047
        _SafeShutdownInstanceDisks(self.lu, self.instance)
9048

    
9049
    if __debug__:
9050
      # Verify owned locks
9051
      owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
9052
      nodes = frozenset(self.node_secondary_ip)
9053
      assert ((self.early_release and not owned_locks) or
9054
              (not self.early_release and not (set(owned_locks) - nodes))), \
9055
        ("Not owning the correct locks, early_release=%s, owned=%r,"
9056
         " nodes=%r" % (self.early_release, owned_locks, nodes))
9057

    
9058
    return result
9059

    
9060
  def _CheckVolumeGroup(self, nodes):
9061
    self.lu.LogInfo("Checking volume groups")
9062

    
9063
    vgname = self.cfg.GetVGName()
9064

    
9065
    # Make sure volume group exists on all involved nodes
9066
    results = self.rpc.call_vg_list(nodes)
9067
    if not results:
9068
      raise errors.OpExecError("Can't list volume groups on the nodes")
9069

    
9070
    for node in nodes:
9071
      res = results[node]
9072
      res.Raise("Error checking node %s" % node)
9073
      if vgname not in res.payload:
9074
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
9075
                                 (vgname, node))
9076

    
9077
  def _CheckDisksExistence(self, nodes):
9078
    # Check disk existence
9079
    for idx, dev in enumerate(self.instance.disks):
9080
      if idx not in self.disks:
9081
        continue
9082

    
9083
      for node in nodes:
9084
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
9085
        self.cfg.SetDiskID(dev, node)
9086

    
9087
        result = self.rpc.call_blockdev_find(node, dev)
9088

    
9089
        msg = result.fail_msg
9090
        if msg or not result.payload:
9091
          if not msg:
9092
            msg = "disk not found"
9093
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
9094
                                   (idx, node, msg))
9095

    
9096
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
9097
    for idx, dev in enumerate(self.instance.disks):
9098
      if idx not in self.disks:
9099
        continue
9100

    
9101
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
9102
                      (idx, node_name))
9103

    
9104
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
9105
                                   ldisk=ldisk):
9106
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
9107
                                 " replace disks for instance %s" %
9108
                                 (node_name, self.instance.name))
9109

    
9110
  def _CreateNewStorage(self, node_name):
9111
    iv_names = {}
9112

    
9113
    for idx, dev in enumerate(self.instance.disks):
9114
      if idx not in self.disks:
9115
        continue
9116

    
9117
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
9118

    
9119
      self.cfg.SetDiskID(dev, node_name)
9120

    
9121
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
9122
      names = _GenerateUniqueNames(self.lu, lv_names)
9123

    
9124
      vg_data = dev.children[0].logical_id[0]
9125
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
9126
                             logical_id=(vg_data, names[0]))
9127
      vg_meta = dev.children[1].logical_id[0]
9128
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
9129
                             logical_id=(vg_meta, names[1]))
9130

    
9131
      new_lvs = [lv_data, lv_meta]
9132
      old_lvs = dev.children
9133
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
9134

    
9135
      # we pass force_create=True to force the LVM creation
9136
      for new_lv in new_lvs:
9137
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
9138
                        _GetInstanceInfoText(self.instance), False)
9139

    
9140
    return iv_names
9141

    
9142
  def _CheckDevices(self, node_name, iv_names):
9143
    for name, (dev, _, _) in iv_names.iteritems():
9144
      self.cfg.SetDiskID(dev, node_name)
9145

    
9146
      result = self.rpc.call_blockdev_find(node_name, dev)
9147

    
9148
      msg = result.fail_msg
9149
      if msg or not result.payload:
9150
        if not msg:
9151
          msg = "disk not found"
9152
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
9153
                                 (name, msg))
9154

    
9155
      if result.payload.is_degraded:
9156
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
9157

    
9158
  def _RemoveOldStorage(self, node_name, iv_names):
9159
    for name, (_, old_lvs, _) in iv_names.iteritems():
9160
      self.lu.LogInfo("Remove logical volumes for %s" % name)
9161

    
9162
      for lv in old_lvs:
9163
        self.cfg.SetDiskID(lv, node_name)
9164

    
9165
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
9166
        if msg:
9167
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
9168
                             hint="remove unused LVs manually")
9169

    
9170
  def _ExecDrbd8DiskOnly(self, feedback_fn):
9171
    """Replace a disk on the primary or secondary for DRBD 8.
9172

9173
    The algorithm for replace is quite complicated:
9174

9175
      1. for each disk to be replaced:
9176

9177
        1. create new LVs on the target node with unique names
9178
        1. detach old LVs from the drbd device
9179
        1. rename old LVs to name_replaced.<time_t>
9180
        1. rename new LVs to old LVs
9181
        1. attach the new LVs (with the old names now) to the drbd device
9182

9183
      1. wait for sync across all devices
9184

9185
      1. for each modified disk:
9186

9187
        1. remove old LVs (which have the name name_replaces.<time_t>)
9188

9189
    Failures are not very well handled.
9190

9191
    """
9192
    steps_total = 6
9193

    
9194
    # Step: check device activation
9195
    self.lu.LogStep(1, steps_total, "Check device existence")
9196
    self._CheckDisksExistence([self.other_node, self.target_node])
9197
    self._CheckVolumeGroup([self.target_node, self.other_node])
9198

    
9199
    # Step: check other node consistency
9200
    self.lu.LogStep(2, steps_total, "Check peer consistency")
9201
    self._CheckDisksConsistency(self.other_node,
9202
                                self.other_node == self.instance.primary_node,
9203
                                False)
9204

    
9205
    # Step: create new storage
9206
    self.lu.LogStep(3, steps_total, "Allocate new storage")
9207
    iv_names = self._CreateNewStorage(self.target_node)
9208

    
9209
    # Step: for each lv, detach+rename*2+attach
9210
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
9211
    for dev, old_lvs, new_lvs in iv_names.itervalues():
9212
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
9213

    
9214
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
9215
                                                     old_lvs)
9216
      result.Raise("Can't detach drbd from local storage on node"
9217
                   " %s for device %s" % (self.target_node, dev.iv_name))
9218
      #dev.children = []
9219
      #cfg.Update(instance)
9220

    
9221
      # ok, we created the new LVs, so now we know we have the needed
9222
      # storage; as such, we proceed on the target node to rename
9223
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
9224
      # using the assumption that logical_id == physical_id (which in
9225
      # turn is the unique_id on that node)
9226

    
9227
      # FIXME(iustin): use a better name for the replaced LVs
9228
      temp_suffix = int(time.time())
9229
      ren_fn = lambda d, suff: (d.physical_id[0],
9230
                                d.physical_id[1] + "_replaced-%s" % suff)
9231

    
9232
      # Build the rename list based on what LVs exist on the node
9233
      rename_old_to_new = []
9234
      for to_ren in old_lvs:
9235
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
9236
        if not result.fail_msg and result.payload:
9237
          # device exists
9238
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
9239

    
9240
      self.lu.LogInfo("Renaming the old LVs on the target node")
9241
      result = self.rpc.call_blockdev_rename(self.target_node,
9242
                                             rename_old_to_new)
9243
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
9244

    
9245
      # Now we rename the new LVs to the old LVs
9246
      self.lu.LogInfo("Renaming the new LVs on the target node")
9247
      rename_new_to_old = [(new, old.physical_id)
9248
                           for old, new in zip(old_lvs, new_lvs)]
9249
      result = self.rpc.call_blockdev_rename(self.target_node,
9250
                                             rename_new_to_old)
9251
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
9252

    
9253
      for old, new in zip(old_lvs, new_lvs):
9254
        new.logical_id = old.logical_id
9255
        self.cfg.SetDiskID(new, self.target_node)
9256

    
9257
      for disk in old_lvs:
9258
        disk.logical_id = ren_fn(disk, temp_suffix)
9259
        self.cfg.SetDiskID(disk, self.target_node)
9260

    
9261
      # Now that the new lvs have the old name, we can add them to the device
9262
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
9263
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
9264
                                                  new_lvs)
9265
      msg = result.fail_msg
9266
      if msg:
9267
        for new_lv in new_lvs:
9268
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
9269
                                               new_lv).fail_msg
9270
          if msg2:
9271
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
9272
                               hint=("cleanup manually the unused logical"
9273
                                     "volumes"))
9274
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
9275

    
9276
      dev.children = new_lvs
9277

    
9278
      self.cfg.Update(self.instance, feedback_fn)
9279

    
9280
    cstep = 5
9281
    if self.early_release:
9282
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
9283
      cstep += 1
9284
      self._RemoveOldStorage(self.target_node, iv_names)
9285
      # WARNING: we release both node locks here, do not do other RPCs
9286
      # than WaitForSync to the primary node
9287
      _ReleaseLocks(self.lu, locking.LEVEL_NODE,
9288
                    names=[self.target_node, self.other_node])
9289

    
9290
    # Wait for sync
9291
    # This can fail as the old devices are degraded and _WaitForSync
9292
    # does a combined result over all disks, so we don't check its return value
9293
    self.lu.LogStep(cstep, steps_total, "Sync devices")
9294
    cstep += 1
9295
    _WaitForSync(self.lu, self.instance)
9296

    
9297
    # Check all devices manually
9298
    self._CheckDevices(self.instance.primary_node, iv_names)
9299

    
9300
    # Step: remove old storage
9301
    if not self.early_release:
9302
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
9303
      cstep += 1
9304
      self._RemoveOldStorage(self.target_node, iv_names)
9305

    
9306
  def _ExecDrbd8Secondary(self, feedback_fn):
9307
    """Replace the secondary node for DRBD 8.
9308

9309
    The algorithm for replace is quite complicated:
9310
      - for all disks of the instance:
9311
        - create new LVs on the new node with same names
9312
        - shutdown the drbd device on the old secondary
9313
        - disconnect the drbd network on the primary
9314
        - create the drbd device on the new secondary
9315
        - network attach the drbd on the primary, using an artifice:
9316
          the drbd code for Attach() will connect to the network if it
9317
          finds a device which is connected to the good local disks but
9318
          not network enabled
9319
      - wait for sync across all devices
9320
      - remove all disks from the old secondary
9321

9322
    Failures are not very well handled.
9323

9324
    """
9325
    steps_total = 6
9326

    
9327
    # Step: check device activation
9328
    self.lu.LogStep(1, steps_total, "Check device existence")
9329
    self._CheckDisksExistence([self.instance.primary_node])
9330
    self._CheckVolumeGroup([self.instance.primary_node])
9331

    
9332
    # Step: check other node consistency
9333
    self.lu.LogStep(2, steps_total, "Check peer consistency")
9334
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
9335

    
9336
    # Step: create new storage
9337
    self.lu.LogStep(3, steps_total, "Allocate new storage")
9338
    for idx, dev in enumerate(self.instance.disks):
9339
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
9340
                      (self.new_node, idx))
9341
      # we pass force_create=True to force LVM creation
9342
      for new_lv in dev.children:
9343
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
9344
                        _GetInstanceInfoText(self.instance), False)
9345

    
9346
    # Step 4: dbrd minors and drbd setups changes
9347
    # after this, we must manually remove the drbd minors on both the
9348
    # error and the success paths
9349
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
9350
    minors = self.cfg.AllocateDRBDMinor([self.new_node
9351
                                         for dev in self.instance.disks],
9352
                                        self.instance.name)
9353
    logging.debug("Allocated minors %r", minors)
9354

    
9355
    iv_names = {}
9356
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
9357
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
9358
                      (self.new_node, idx))
9359
      # create new devices on new_node; note that we create two IDs:
9360
      # one without port, so the drbd will be activated without
9361
      # networking information on the new node at this stage, and one
9362
      # with network, for the latter activation in step 4
9363
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
9364
      if self.instance.primary_node == o_node1:
9365
        p_minor = o_minor1
9366
      else:
9367
        assert self.instance.primary_node == o_node2, "Three-node instance?"
9368
        p_minor = o_minor2
9369

    
9370
      new_alone_id = (self.instance.primary_node, self.new_node, None,
9371
                      p_minor, new_minor, o_secret)
9372
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
9373
                    p_minor, new_minor, o_secret)
9374

    
9375
      iv_names[idx] = (dev, dev.children, new_net_id)
9376
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
9377
                    new_net_id)
9378
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
9379
                              logical_id=new_alone_id,
9380
                              children=dev.children,
9381
                              size=dev.size)
9382
      try:
9383
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
9384
                              _GetInstanceInfoText(self.instance), False)
9385
      except errors.GenericError:
9386
        self.cfg.ReleaseDRBDMinors(self.instance.name)
9387
        raise
9388

    
9389
    # We have new devices, shutdown the drbd on the old secondary
9390
    for idx, dev in enumerate(self.instance.disks):
9391
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
9392
      self.cfg.SetDiskID(dev, self.target_node)
9393
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
9394
      if msg:
9395
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
9396
                           "node: %s" % (idx, msg),
9397
                           hint=("Please cleanup this device manually as"
9398
                                 " soon as possible"))
9399

    
9400
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
9401
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
9402
                                               self.node_secondary_ip,
9403
                                               self.instance.disks)\
9404
                                              [self.instance.primary_node]
9405

    
9406
    msg = result.fail_msg
9407
    if msg:
9408
      # detaches didn't succeed (unlikely)
9409
      self.cfg.ReleaseDRBDMinors(self.instance.name)
9410
      raise errors.OpExecError("Can't detach the disks from the network on"
9411
                               " old node: %s" % (msg,))
9412

    
9413
    # if we managed to detach at least one, we update all the disks of
9414
    # the instance to point to the new secondary
9415
    self.lu.LogInfo("Updating instance configuration")
9416
    for dev, _, new_logical_id in iv_names.itervalues():
9417
      dev.logical_id = new_logical_id
9418
      self.cfg.SetDiskID(dev, self.instance.primary_node)
9419

    
9420
    self.cfg.Update(self.instance, feedback_fn)
9421

    
9422
    # and now perform the drbd attach
9423
    self.lu.LogInfo("Attaching primary drbds to new secondary"
9424
                    " (standalone => connected)")
9425
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
9426
                                            self.new_node],
9427
                                           self.node_secondary_ip,
9428
                                           self.instance.disks,
9429
                                           self.instance.name,
9430
                                           False)
9431
    for to_node, to_result in result.items():
9432
      msg = to_result.fail_msg
9433
      if msg:
9434
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
9435
                           to_node, msg,
9436
                           hint=("please do a gnt-instance info to see the"
9437
                                 " status of disks"))
9438
    cstep = 5
9439
    if self.early_release:
9440
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
9441
      cstep += 1
9442
      self._RemoveOldStorage(self.target_node, iv_names)
9443
      # WARNING: we release all node locks here, do not do other RPCs
9444
      # than WaitForSync to the primary node
9445
      _ReleaseLocks(self.lu, locking.LEVEL_NODE,
9446
                    names=[self.instance.primary_node,
9447
                           self.target_node,
9448
                           self.new_node])
9449

    
9450
    # Wait for sync
9451
    # This can fail as the old devices are degraded and _WaitForSync
9452
    # does a combined result over all disks, so we don't check its return value
9453
    self.lu.LogStep(cstep, steps_total, "Sync devices")
9454
    cstep += 1
9455
    _WaitForSync(self.lu, self.instance)
9456

    
9457
    # Check all devices manually
9458
    self._CheckDevices(self.instance.primary_node, iv_names)
9459

    
9460
    # Step: remove old storage
9461
    if not self.early_release:
9462
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
9463
      self._RemoveOldStorage(self.target_node, iv_names)
9464

    
9465

    
9466
class LURepairNodeStorage(NoHooksLU):
9467
  """Repairs the volume group on a node.
9468

9469
  """
9470
  REQ_BGL = False
9471

    
9472
  def CheckArguments(self):
9473
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
9474

    
9475
    storage_type = self.op.storage_type
9476

    
9477
    if (constants.SO_FIX_CONSISTENCY not in
9478
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
9479
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
9480
                                 " repaired" % storage_type,
9481
                                 errors.ECODE_INVAL)
9482

    
9483
  def ExpandNames(self):
9484
    self.needed_locks = {
9485
      locking.LEVEL_NODE: [self.op.node_name],
9486
      }
9487

    
9488
  def _CheckFaultyDisks(self, instance, node_name):
9489
    """Ensure faulty disks abort the opcode or at least warn."""
9490
    try:
9491
      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
9492
                                  node_name, True):
9493
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
9494
                                   " node '%s'" % (instance.name, node_name),
9495
                                   errors.ECODE_STATE)
9496
    except errors.OpPrereqError, err:
9497
      if self.op.ignore_consistency:
9498
        self.proc.LogWarning(str(err.args[0]))
9499
      else:
9500
        raise
9501

    
9502
  def CheckPrereq(self):
9503
    """Check prerequisites.
9504

9505
    """
9506
    # Check whether any instance on this node has faulty disks
9507
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
9508
      if not inst.admin_up:
9509
        continue
9510
      check_nodes = set(inst.all_nodes)
9511
      check_nodes.discard(self.op.node_name)
9512
      for inst_node_name in check_nodes:
9513
        self._CheckFaultyDisks(inst, inst_node_name)
9514

    
9515
  def Exec(self, feedback_fn):
9516
    feedback_fn("Repairing storage unit '%s' on %s ..." %
9517
                (self.op.name, self.op.node_name))
9518

    
9519
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
9520
    result = self.rpc.call_storage_execute(self.op.node_name,
9521
                                           self.op.storage_type, st_args,
9522
                                           self.op.name,
9523
                                           constants.SO_FIX_CONSISTENCY)
9524
    result.Raise("Failed to repair storage unit '%s' on %s" %
9525
                 (self.op.name, self.op.node_name))
9526

    
9527

    
9528
class LUNodeEvacStrategy(NoHooksLU):
9529
  """Computes the node evacuation strategy.
9530

9531
  """
9532
  REQ_BGL = False
9533

    
9534
  def CheckArguments(self):
9535
    _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
9536

    
9537
  def ExpandNames(self):
9538
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
9539
    self.needed_locks = locks = {}
9540
    if self.op.remote_node is None:
9541
      locks[locking.LEVEL_NODE] = locking.ALL_SET
9542
    else:
9543
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9544
      locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
9545

    
9546
  def Exec(self, feedback_fn):
9547
    if self.op.remote_node is not None:
9548
      instances = []
9549
      for node in self.op.nodes:
9550
        instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
9551
      result = []
9552
      for i in instances:
9553
        if i.primary_node == self.op.remote_node:
9554
          raise errors.OpPrereqError("Node %s is the primary node of"
9555
                                     " instance %s, cannot use it as"
9556
                                     " secondary" %
9557
                                     (self.op.remote_node, i.name),
9558
                                     errors.ECODE_INVAL)
9559
        result.append([i.name, self.op.remote_node])
9560
    else:
9561
      ial = IAllocator(self.cfg, self.rpc,
9562
                       mode=constants.IALLOCATOR_MODE_MEVAC,
9563
                       evac_nodes=self.op.nodes)
9564
      ial.Run(self.op.iallocator, validate=True)
9565
      if not ial.success:
9566
        raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
9567
                                 errors.ECODE_NORES)
9568
      result = ial.result
9569
    return result
9570

    
9571

    
9572
class LUInstanceGrowDisk(LogicalUnit):
9573
  """Grow a disk of an instance.
9574

9575
  """
9576
  HPATH = "disk-grow"
9577
  HTYPE = constants.HTYPE_INSTANCE
9578
  REQ_BGL = False
9579

    
9580
  def ExpandNames(self):
9581
    self._ExpandAndLockInstance()
9582
    self.needed_locks[locking.LEVEL_NODE] = []
9583
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9584

    
9585
  def DeclareLocks(self, level):
9586
    if level == locking.LEVEL_NODE:
9587
      self._LockInstancesNodes()
9588

    
9589
  def BuildHooksEnv(self):
9590
    """Build hooks env.
9591

9592
    This runs on the master, the primary and all the secondaries.
9593

9594
    """
9595
    env = {
9596
      "DISK": self.op.disk,
9597
      "AMOUNT": self.op.amount,
9598
      }
9599
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
9600
    return env
9601

    
9602
  def BuildHooksNodes(self):
9603
    """Build hooks nodes.
9604

9605
    """
9606
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
9607
    return (nl, nl)
9608

    
9609
  def CheckPrereq(self):
9610
    """Check prerequisites.
9611

9612
    This checks that the instance is in the cluster.
9613

9614
    """
9615
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
9616
    assert instance is not None, \
9617
      "Cannot retrieve locked instance %s" % self.op.instance_name
9618
    nodenames = list(instance.all_nodes)
9619
    for node in nodenames:
9620
      _CheckNodeOnline(self, node)
9621

    
9622
    self.instance = instance
9623

    
9624
    if instance.disk_template not in constants.DTS_GROWABLE:
9625
      raise errors.OpPrereqError("Instance's disk layout does not support"
9626
                                 " growing", errors.ECODE_INVAL)
9627

    
9628
    self.disk = instance.FindDisk(self.op.disk)
9629

    
9630
    if instance.disk_template not in (constants.DT_FILE,
9631
                                      constants.DT_SHARED_FILE):
9632
      # TODO: check the free disk space for file, when that feature will be
9633
      # supported
9634
      _CheckNodesFreeDiskPerVG(self, nodenames,
9635
                               self.disk.ComputeGrowth(self.op.amount))
9636

    
9637
  def Exec(self, feedback_fn):
9638
    """Execute disk grow.
9639

9640
    """
9641
    instance = self.instance
9642
    disk = self.disk
9643

    
9644
    disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
9645
    if not disks_ok:
9646
      raise errors.OpExecError("Cannot activate block device to grow")
9647

    
9648
    # First run all grow ops in dry-run mode
9649
    for node in instance.all_nodes:
9650
      self.cfg.SetDiskID(disk, node)
9651
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount, True)
9652
      result.Raise("Grow request failed to node %s" % node)
9653

    
9654
    # We know that (as far as we can test) operations across different
9655
    # nodes will succeed, time to run it for real
9656
    for node in instance.all_nodes:
9657
      self.cfg.SetDiskID(disk, node)
9658
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount, False)
9659
      result.Raise("Grow request failed to node %s" % node)
9660

    
9661
      # TODO: Rewrite code to work properly
9662
      # DRBD goes into sync mode for a short amount of time after executing the
9663
      # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
9664
      # calling "resize" in sync mode fails. Sleeping for a short amount of
9665
      # time is a work-around.
9666
      time.sleep(5)
9667

    
9668
    disk.RecordGrow(self.op.amount)
9669
    self.cfg.Update(instance, feedback_fn)
9670
    if self.op.wait_for_sync:
9671
      disk_abort = not _WaitForSync(self, instance, disks=[disk])
9672
      if disk_abort:
9673
        self.proc.LogWarning("Disk sync-ing has not returned a good"
9674
                             " status; please check the instance")
9675
      if not instance.admin_up:
9676
        _SafeShutdownInstanceDisks(self, instance, disks=[disk])
9677
    elif not instance.admin_up:
9678
      self.proc.LogWarning("Not shutting down the disk even if the instance is"
9679
                           " not supposed to be running because no wait for"
9680
                           " sync mode was requested")
9681

    
9682

    
9683
class LUInstanceQueryData(NoHooksLU):
9684
  """Query runtime instance data.
9685

9686
  """
9687
  REQ_BGL = False
9688

    
9689
  def ExpandNames(self):
9690
    self.needed_locks = {}
9691

    
9692
    # Use locking if requested or when non-static information is wanted
9693
    if not (self.op.static or self.op.use_locking):
9694
      self.LogWarning("Non-static data requested, locks need to be acquired")
9695
      self.op.use_locking = True
9696

    
9697
    if self.op.instances or not self.op.use_locking:
9698
      # Expand instance names right here
9699
      self.wanted_names = _GetWantedInstances(self, self.op.instances)
9700
    else:
9701
      # Will use acquired locks
9702
      self.wanted_names = None
9703

    
9704
    if self.op.use_locking:
9705
      self.share_locks = dict.fromkeys(locking.LEVELS, 1)
9706

    
9707
      if self.wanted_names is None:
9708
        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
9709
      else:
9710
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
9711

    
9712
      self.needed_locks[locking.LEVEL_NODE] = []
9713
      self.share_locks = dict.fromkeys(locking.LEVELS, 1)
9714
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9715

    
9716
  def DeclareLocks(self, level):
9717
    if self.op.use_locking and level == locking.LEVEL_NODE:
9718
      self._LockInstancesNodes()
9719

    
9720
  def CheckPrereq(self):
9721
    """Check prerequisites.
9722

9723
    This only checks the optional instance list against the existing names.
9724

9725
    """
9726
    if self.wanted_names is None:
9727
      assert self.op.use_locking, "Locking was not used"
9728
      self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
9729

    
9730
    self.wanted_instances = [self.cfg.GetInstanceInfo(name)
9731
                             for name in self.wanted_names]
9732

    
9733
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
9734
    """Returns the status of a block device
9735

9736
    """
9737
    if self.op.static or not node:
9738
      return None
9739

    
9740
    self.cfg.SetDiskID(dev, node)
9741

    
9742
    result = self.rpc.call_blockdev_find(node, dev)
9743
    if result.offline:
9744
      return None
9745

    
9746
    result.Raise("Can't compute disk status for %s" % instance_name)
9747

    
9748
    status = result.payload
9749
    if status is None:
9750
      return None
9751

    
9752
    return (status.dev_path, status.major, status.minor,
9753
            status.sync_percent, status.estimated_time,
9754
            status.is_degraded, status.ldisk_status)
9755

    
9756
  def _ComputeDiskStatus(self, instance, snode, dev):
9757
    """Compute block device status.
9758

9759
    """
9760
    if dev.dev_type in constants.LDS_DRBD:
9761
      # we change the snode then (otherwise we use the one passed in)
9762
      if dev.logical_id[0] == instance.primary_node:
9763
        snode = dev.logical_id[1]
9764
      else:
9765
        snode = dev.logical_id[0]
9766

    
9767
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
9768
                                              instance.name, dev)
9769
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
9770

    
9771
    if dev.children:
9772
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
9773
                      for child in dev.children]
9774
    else:
9775
      dev_children = []
9776

    
9777
    return {
9778
      "iv_name": dev.iv_name,
9779
      "dev_type": dev.dev_type,
9780
      "logical_id": dev.logical_id,
9781
      "physical_id": dev.physical_id,
9782
      "pstatus": dev_pstatus,
9783
      "sstatus": dev_sstatus,
9784
      "children": dev_children,
9785
      "mode": dev.mode,
9786
      "size": dev.size,
9787
      }
9788

    
9789
  def Exec(self, feedback_fn):
9790
    """Gather and return data"""
9791
    result = {}
9792

    
9793
    cluster = self.cfg.GetClusterInfo()
9794

    
9795
    for instance in self.wanted_instances:
9796
      if not self.op.static:
9797
        remote_info = self.rpc.call_instance_info(instance.primary_node,
9798
                                                  instance.name,
9799
                                                  instance.hypervisor)
9800
        remote_info.Raise("Error checking node %s" % instance.primary_node)
9801
        remote_info = remote_info.payload
9802
        if remote_info and "state" in remote_info:
9803
          remote_state = "up"
9804
        else:
9805
          remote_state = "down"
9806
      else:
9807
        remote_state = None
9808
      if instance.admin_up:
9809
        config_state = "up"
9810
      else:
9811
        config_state = "down"
9812

    
9813
      disks = [self._ComputeDiskStatus(instance, None, device)
9814
               for device in instance.disks]
9815

    
9816
      result[instance.name] = {
9817
        "name": instance.name,
9818
        "config_state": config_state,
9819
        "run_state": remote_state,
9820
        "pnode": instance.primary_node,
9821
        "snodes": instance.secondary_nodes,
9822
        "os": instance.os,
9823
        # this happens to be the same format used for hooks
9824
        "nics": _NICListToTuple(self, instance.nics),
9825
        "disk_template": instance.disk_template,
9826
        "disks": disks,
9827
        "hypervisor": instance.hypervisor,
9828
        "network_port": instance.network_port,
9829
        "hv_instance": instance.hvparams,
9830
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
9831
        "be_instance": instance.beparams,
9832
        "be_actual": cluster.FillBE(instance),
9833
        "os_instance": instance.osparams,
9834
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
9835
        "serial_no": instance.serial_no,
9836
        "mtime": instance.mtime,
9837
        "ctime": instance.ctime,
9838
        "uuid": instance.uuid,
9839
        }
9840

    
9841
    return result
9842

    
9843

    
9844
class LUInstanceSetParams(LogicalUnit):
9845
  """Modifies an instances's parameters.
9846

9847
  """
9848
  HPATH = "instance-modify"
9849
  HTYPE = constants.HTYPE_INSTANCE
9850
  REQ_BGL = False
9851

    
9852
  def CheckArguments(self):
9853
    if not (self.op.nics or self.op.disks or self.op.disk_template or
9854
            self.op.hvparams or self.op.beparams or self.op.os_name):
9855
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
9856

    
9857
    if self.op.hvparams:
9858
      _CheckGlobalHvParams(self.op.hvparams)
9859

    
9860
    # Disk validation
9861
    disk_addremove = 0
9862
    for disk_op, disk_dict in self.op.disks:
9863
      utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
9864
      if disk_op == constants.DDM_REMOVE:
9865
        disk_addremove += 1
9866
        continue
9867
      elif disk_op == constants.DDM_ADD:
9868
        disk_addremove += 1
9869
      else:
9870
        if not isinstance(disk_op, int):
9871
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
9872
        if not isinstance(disk_dict, dict):
9873
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
9874
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9875

    
9876
      if disk_op == constants.DDM_ADD:
9877
        mode = disk_dict.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
9878
        if mode not in constants.DISK_ACCESS_SET:
9879
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
9880
                                     errors.ECODE_INVAL)
9881
        size = disk_dict.get(constants.IDISK_SIZE, None)
9882
        if size is None:
9883
          raise errors.OpPrereqError("Required disk parameter size missing",
9884
                                     errors.ECODE_INVAL)
9885
        try:
9886
          size = int(size)
9887
        except (TypeError, ValueError), err:
9888
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
9889
                                     str(err), errors.ECODE_INVAL)
9890
        disk_dict[constants.IDISK_SIZE] = size
9891
      else:
9892
        # modification of disk
9893
        if constants.IDISK_SIZE in disk_dict:
9894
          raise errors.OpPrereqError("Disk size change not possible, use"
9895
                                     " grow-disk", errors.ECODE_INVAL)
9896

    
9897
    if disk_addremove > 1:
9898
      raise errors.OpPrereqError("Only one disk add or remove operation"
9899
                                 " supported at a time", errors.ECODE_INVAL)
9900

    
9901
    if self.op.disks and self.op.disk_template is not None:
9902
      raise errors.OpPrereqError("Disk template conversion and other disk"
9903
                                 " changes not supported at the same time",
9904
                                 errors.ECODE_INVAL)
9905

    
9906
    if (self.op.disk_template and
9907
        self.op.disk_template in constants.DTS_INT_MIRROR and
9908
        self.op.remote_node is None):
9909
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
9910
                                 " one requires specifying a secondary node",
9911
                                 errors.ECODE_INVAL)
9912

    
9913
    # NIC validation
9914
    nic_addremove = 0
9915
    for nic_op, nic_dict in self.op.nics:
9916
      utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
9917
      if nic_op == constants.DDM_REMOVE:
9918
        nic_addremove += 1
9919
        continue
9920
      elif nic_op == constants.DDM_ADD:
9921
        nic_addremove += 1
9922
      else:
9923
        if not isinstance(nic_op, int):
9924
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
9925
        if not isinstance(nic_dict, dict):
9926
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
9927
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
9928

    
9929
      # nic_dict should be a dict
9930
      nic_ip = nic_dict.get(constants.INIC_IP, None)
9931
      if nic_ip is not None:
9932
        if nic_ip.lower() == constants.VALUE_NONE:
9933
          nic_dict[constants.INIC_IP] = None
9934
        else:
9935
          if not netutils.IPAddress.IsValid(nic_ip):
9936
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
9937
                                       errors.ECODE_INVAL)
9938

    
9939
      nic_bridge = nic_dict.get('bridge', None)
9940
      nic_link = nic_dict.get(constants.INIC_LINK, None)
9941
      if nic_bridge and nic_link:
9942
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
9943
                                   " at the same time", errors.ECODE_INVAL)
9944
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
9945
        nic_dict['bridge'] = None
9946
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
9947
        nic_dict[constants.INIC_LINK] = None
9948

    
9949
      if nic_op == constants.DDM_ADD:
9950
        nic_mac = nic_dict.get(constants.INIC_MAC, None)
9951
        if nic_mac is None:
9952
          nic_dict[constants.INIC_MAC] = constants.VALUE_AUTO
9953

    
9954
      if constants.INIC_MAC in nic_dict:
9955
        nic_mac = nic_dict[constants.INIC_MAC]
9956
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
9957
          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
9958

    
9959
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
9960
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
9961
                                     " modifying an existing nic",
9962
                                     errors.ECODE_INVAL)
9963

    
9964
    if nic_addremove > 1:
9965
      raise errors.OpPrereqError("Only one NIC add or remove operation"
9966
                                 " supported at a time", errors.ECODE_INVAL)
9967

    
9968
  def ExpandNames(self):
9969
    self._ExpandAndLockInstance()
9970
    self.needed_locks[locking.LEVEL_NODE] = []
9971
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9972

    
9973
  def DeclareLocks(self, level):
9974
    if level == locking.LEVEL_NODE:
9975
      self._LockInstancesNodes()
9976
      if self.op.disk_template and self.op.remote_node:
9977
        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9978
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
9979

    
9980
  def BuildHooksEnv(self):
9981
    """Build hooks env.
9982

9983
    This runs on the master, primary and secondaries.
9984

9985
    """
9986
    args = dict()
9987
    if constants.BE_MEMORY in self.be_new:
9988
      args['memory'] = self.be_new[constants.BE_MEMORY]
9989
    if constants.BE_VCPUS in self.be_new:
9990
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
9991
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
9992
    # information at all.
9993
    if self.op.nics:
9994
      args['nics'] = []
9995
      nic_override = dict(self.op.nics)
9996
      for idx, nic in enumerate(self.instance.nics):
9997
        if idx in nic_override:
9998
          this_nic_override = nic_override[idx]
9999
        else:
10000
          this_nic_override = {}
10001
        if constants.INIC_IP in this_nic_override:
10002
          ip = this_nic_override[constants.INIC_IP]
10003
        else:
10004
          ip = nic.ip
10005
        if constants.INIC_MAC in this_nic_override:
10006
          mac = this_nic_override[constants.INIC_MAC]
10007
        else:
10008
          mac = nic.mac
10009
        if idx in self.nic_pnew:
10010
          nicparams = self.nic_pnew[idx]
10011
        else:
10012
          nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
10013
        mode = nicparams[constants.NIC_MODE]
10014
        link = nicparams[constants.NIC_LINK]
10015
        args['nics'].append((ip, mac, mode, link))
10016
      if constants.DDM_ADD in nic_override:
10017
        ip = nic_override[constants.DDM_ADD].get(constants.INIC_IP, None)
10018
        mac = nic_override[constants.DDM_ADD][constants.INIC_MAC]
10019
        nicparams = self.nic_pnew[constants.DDM_ADD]
10020
        mode = nicparams[constants.NIC_MODE]
10021
        link = nicparams[constants.NIC_LINK]
10022
        args['nics'].append((ip, mac, mode, link))
10023
      elif constants.DDM_REMOVE in nic_override:
10024
        del args['nics'][-1]
10025

    
10026
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
10027
    if self.op.disk_template:
10028
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
10029

    
10030
    return env
10031

    
10032
  def BuildHooksNodes(self):
10033
    """Build hooks nodes.
10034

10035
    """
10036
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
10037
    return (nl, nl)
10038

    
10039
  def CheckPrereq(self):
10040
    """Check prerequisites.
10041

10042
    This only checks the instance list against the existing names.
10043

10044
    """
10045
    # checking the new params on the primary/secondary nodes
10046

    
10047
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
10048
    cluster = self.cluster = self.cfg.GetClusterInfo()
10049
    assert self.instance is not None, \
10050
      "Cannot retrieve locked instance %s" % self.op.instance_name
10051
    pnode = instance.primary_node
10052
    nodelist = list(instance.all_nodes)
10053

    
10054
    # OS change
10055
    if self.op.os_name and not self.op.force:
10056
      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
10057
                      self.op.force_variant)
10058
      instance_os = self.op.os_name
10059
    else:
10060
      instance_os = instance.os
10061

    
10062
    if self.op.disk_template:
10063
      if instance.disk_template == self.op.disk_template:
10064
        raise errors.OpPrereqError("Instance already has disk template %s" %
10065
                                   instance.disk_template, errors.ECODE_INVAL)
10066

    
10067
      if (instance.disk_template,
10068
          self.op.disk_template) not in self._DISK_CONVERSIONS:
10069
        raise errors.OpPrereqError("Unsupported disk template conversion from"
10070
                                   " %s to %s" % (instance.disk_template,
10071
                                                  self.op.disk_template),
10072
                                   errors.ECODE_INVAL)
10073
      _CheckInstanceDown(self, instance, "cannot change disk template")
10074
      if self.op.disk_template in constants.DTS_INT_MIRROR:
10075
        if self.op.remote_node == pnode:
10076
          raise errors.OpPrereqError("Given new secondary node %s is the same"
10077
                                     " as the primary node of the instance" %
10078
                                     self.op.remote_node, errors.ECODE_STATE)
10079
        _CheckNodeOnline(self, self.op.remote_node)
10080
        _CheckNodeNotDrained(self, self.op.remote_node)
10081
        # FIXME: here we assume that the old instance type is DT_PLAIN
10082
        assert instance.disk_template == constants.DT_PLAIN
10083
        disks = [{constants.IDISK_SIZE: d.size,
10084
                  constants.IDISK_VG: d.logical_id[0]}
10085
                 for d in instance.disks]
10086
        required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
10087
        _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
10088

    
10089
    # hvparams processing
10090
    if self.op.hvparams:
10091
      hv_type = instance.hypervisor
10092
      i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
10093
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
10094
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
10095

    
10096
      # local check
10097
      hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
10098
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
10099
      self.hv_new = hv_new # the new actual values
10100
      self.hv_inst = i_hvdict # the new dict (without defaults)
10101
    else:
10102
      self.hv_new = self.hv_inst = {}
10103

    
10104
    # beparams processing
10105
    if self.op.beparams:
10106
      i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
10107
                                   use_none=True)
10108
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
10109
      be_new = cluster.SimpleFillBE(i_bedict)
10110
      self.be_new = be_new # the new actual values
10111
      self.be_inst = i_bedict # the new dict (without defaults)
10112
    else:
10113
      self.be_new = self.be_inst = {}
10114
    be_old = cluster.FillBE(instance)
10115

    
10116
    # osparams processing
10117
    if self.op.osparams:
10118
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
10119
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
10120
      self.os_inst = i_osdict # the new dict (without defaults)
10121
    else:
10122
      self.os_inst = {}
10123

    
10124
    self.warn = []
10125

    
10126
    if (constants.BE_MEMORY in self.op.beparams and not self.op.force and
10127
        be_new[constants.BE_MEMORY] > be_old[constants.BE_MEMORY]):
10128
      mem_check_list = [pnode]
10129
      if be_new[constants.BE_AUTO_BALANCE]:
10130
        # either we changed auto_balance to yes or it was from before
10131
        mem_check_list.extend(instance.secondary_nodes)
10132
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
10133
                                                  instance.hypervisor)
10134
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
10135
                                         instance.hypervisor)
10136
      pninfo = nodeinfo[pnode]
10137
      msg = pninfo.fail_msg
10138
      if msg:
10139
        # Assume the primary node is unreachable and go ahead
10140
        self.warn.append("Can't get info from primary node %s: %s" %
10141
                         (pnode,  msg))
10142
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
10143
        self.warn.append("Node data from primary node %s doesn't contain"
10144
                         " free memory information" % pnode)
10145
      elif instance_info.fail_msg:
10146
        self.warn.append("Can't get instance runtime information: %s" %
10147
                        instance_info.fail_msg)
10148
      else:
10149
        if instance_info.payload:
10150
          current_mem = int(instance_info.payload['memory'])
10151
        else:
10152
          # Assume instance not running
10153
          # (there is a slight race condition here, but it's not very probable,
10154
          # and we have no other way to check)
10155
          current_mem = 0
10156
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
10157
                    pninfo.payload['memory_free'])
10158
        if miss_mem > 0:
10159
          raise errors.OpPrereqError("This change will prevent the instance"
10160
                                     " from starting, due to %d MB of memory"
10161
                                     " missing on its primary node" % miss_mem,
10162
                                     errors.ECODE_NORES)
10163

    
10164
      if be_new[constants.BE_AUTO_BALANCE]:
10165
        for node, nres in nodeinfo.items():
10166
          if node not in instance.secondary_nodes:
10167
            continue
10168
          nres.Raise("Can't get info from secondary node %s" % node,
10169
                     prereq=True, ecode=errors.ECODE_STATE)
10170
          if not isinstance(nres.payload.get('memory_free', None), int):
10171
            raise errors.OpPrereqError("Secondary node %s didn't return free"
10172
                                       " memory information" % node,
10173
                                       errors.ECODE_STATE)
10174
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
10175
            raise errors.OpPrereqError("This change will prevent the instance"
10176
                                       " from failover to its secondary node"
10177
                                       " %s, due to not enough memory" % node,
10178
                                       errors.ECODE_STATE)
10179

    
10180
    # NIC processing
10181
    self.nic_pnew = {}
10182
    self.nic_pinst = {}
10183
    for nic_op, nic_dict in self.op.nics:
10184
      if nic_op == constants.DDM_REMOVE:
10185
        if not instance.nics:
10186
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
10187
                                     errors.ECODE_INVAL)
10188
        continue
10189
      if nic_op != constants.DDM_ADD:
10190
        # an existing nic
10191
        if not instance.nics:
10192
          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
10193
                                     " no NICs" % nic_op,
10194
                                     errors.ECODE_INVAL)
10195
        if nic_op < 0 or nic_op >= len(instance.nics):
10196
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
10197
                                     " are 0 to %d" %
10198
                                     (nic_op, len(instance.nics) - 1),
10199
                                     errors.ECODE_INVAL)
10200
        old_nic_params = instance.nics[nic_op].nicparams
10201
        old_nic_ip = instance.nics[nic_op].ip
10202
      else:
10203
        old_nic_params = {}
10204
        old_nic_ip = None
10205

    
10206
      update_params_dict = dict([(key, nic_dict[key])
10207
                                 for key in constants.NICS_PARAMETERS
10208
                                 if key in nic_dict])
10209

    
10210
      if 'bridge' in nic_dict:
10211
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
10212

    
10213
      new_nic_params = _GetUpdatedParams(old_nic_params,
10214
                                         update_params_dict)
10215
      utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
10216
      new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
10217
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
10218
      self.nic_pinst[nic_op] = new_nic_params
10219
      self.nic_pnew[nic_op] = new_filled_nic_params
10220
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
10221

    
10222
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
10223
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
10224
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
10225
        if msg:
10226
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
10227
          if self.op.force:
10228
            self.warn.append(msg)
10229
          else:
10230
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
10231
      if new_nic_mode == constants.NIC_MODE_ROUTED:
10232
        if constants.INIC_IP in nic_dict:
10233
          nic_ip = nic_dict[constants.INIC_IP]
10234
        else:
10235
          nic_ip = old_nic_ip
10236
        if nic_ip is None:
10237
          raise errors.OpPrereqError('Cannot set the nic ip to None'
10238
                                     ' on a routed nic', errors.ECODE_INVAL)
10239
      if constants.INIC_MAC in nic_dict:
10240
        nic_mac = nic_dict[constants.INIC_MAC]
10241
        if nic_mac is None:
10242
          raise errors.OpPrereqError('Cannot set the nic mac to None',
10243
                                     errors.ECODE_INVAL)
10244
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
10245
          # otherwise generate the mac
10246
          nic_dict[constants.INIC_MAC] = \
10247
            self.cfg.GenerateMAC(self.proc.GetECId())
10248
        else:
10249
          # or validate/reserve the current one
10250
          try:
10251
            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
10252
          except errors.ReservationError:
10253
            raise errors.OpPrereqError("MAC address %s already in use"
10254
                                       " in cluster" % nic_mac,
10255
                                       errors.ECODE_NOTUNIQUE)
10256

    
10257
    # DISK processing
10258
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
10259
      raise errors.OpPrereqError("Disk operations not supported for"
10260
                                 " diskless instances",
10261
                                 errors.ECODE_INVAL)
10262
    for disk_op, _ in self.op.disks:
10263
      if disk_op == constants.DDM_REMOVE:
10264
        if len(instance.disks) == 1:
10265
          raise errors.OpPrereqError("Cannot remove the last disk of"
10266
                                     " an instance", errors.ECODE_INVAL)
10267
        _CheckInstanceDown(self, instance, "cannot remove disks")
10268

    
10269
      if (disk_op == constants.DDM_ADD and
10270
          len(instance.disks) >= constants.MAX_DISKS):
10271
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
10272
                                   " add more" % constants.MAX_DISKS,
10273
                                   errors.ECODE_STATE)
10274
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
10275
        # an existing disk
10276
        if disk_op < 0 or disk_op >= len(instance.disks):
10277
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
10278
                                     " are 0 to %d" %
10279
                                     (disk_op, len(instance.disks)),
10280
                                     errors.ECODE_INVAL)
10281

    
10282
    return
10283

    
10284
  def _ConvertPlainToDrbd(self, feedback_fn):
10285
    """Converts an instance from plain to drbd.
10286

10287
    """
10288
    feedback_fn("Converting template to drbd")
10289
    instance = self.instance
10290
    pnode = instance.primary_node
10291
    snode = self.op.remote_node
10292

    
10293
    # create a fake disk info for _GenerateDiskTemplate
10294
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
10295
                  constants.IDISK_VG: d.logical_id[0]}
10296
                 for d in instance.disks]
10297
    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
10298
                                      instance.name, pnode, [snode],
10299
                                      disk_info, None, None, 0, feedback_fn)
10300
    info = _GetInstanceInfoText(instance)
10301
    feedback_fn("Creating aditional volumes...")
10302
    # first, create the missing data and meta devices
10303
    for disk in new_disks:
10304
      # unfortunately this is... not too nice
10305
      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
10306
                            info, True)
10307
      for child in disk.children:
10308
        _CreateSingleBlockDev(self, snode, instance, child, info, True)
10309
    # at this stage, all new LVs have been created, we can rename the
10310
    # old ones
10311
    feedback_fn("Renaming original volumes...")
10312
    rename_list = [(o, n.children[0].logical_id)
10313
                   for (o, n) in zip(instance.disks, new_disks)]
10314
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
10315
    result.Raise("Failed to rename original LVs")
10316

    
10317
    feedback_fn("Initializing DRBD devices...")
10318
    # all child devices are in place, we can now create the DRBD devices
10319
    for disk in new_disks:
10320
      for node in [pnode, snode]:
10321
        f_create = node == pnode
10322
        _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
10323

    
10324
    # at this point, the instance has been modified
10325
    instance.disk_template = constants.DT_DRBD8
10326
    instance.disks = new_disks
10327
    self.cfg.Update(instance, feedback_fn)
10328

    
10329
    # disks are created, waiting for sync
10330
    disk_abort = not _WaitForSync(self, instance,
10331
                                  oneshot=not self.op.wait_for_sync)
10332
    if disk_abort:
10333
      raise errors.OpExecError("There are some degraded disks for"
10334
                               " this instance, please cleanup manually")
10335

    
10336
  def _ConvertDrbdToPlain(self, feedback_fn):
10337
    """Converts an instance from drbd to plain.
10338

10339
    """
10340
    instance = self.instance
10341
    assert len(instance.secondary_nodes) == 1
10342
    pnode = instance.primary_node
10343
    snode = instance.secondary_nodes[0]
10344
    feedback_fn("Converting template to plain")
10345

    
10346
    old_disks = instance.disks
10347
    new_disks = [d.children[0] for d in old_disks]
10348

    
10349
    # copy over size and mode
10350
    for parent, child in zip(old_disks, new_disks):
10351
      child.size = parent.size
10352
      child.mode = parent.mode
10353

    
10354
    # update instance structure
10355
    instance.disks = new_disks
10356
    instance.disk_template = constants.DT_PLAIN
10357
    self.cfg.Update(instance, feedback_fn)
10358

    
10359
    feedback_fn("Removing volumes on the secondary node...")
10360
    for disk in old_disks:
10361
      self.cfg.SetDiskID(disk, snode)
10362
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
10363
      if msg:
10364
        self.LogWarning("Could not remove block device %s on node %s,"
10365
                        " continuing anyway: %s", disk.iv_name, snode, msg)
10366

    
10367
    feedback_fn("Removing unneeded volumes on the primary node...")
10368
    for idx, disk in enumerate(old_disks):
10369
      meta = disk.children[1]
10370
      self.cfg.SetDiskID(meta, pnode)
10371
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
10372
      if msg:
10373
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
10374
                        " continuing anyway: %s", idx, pnode, msg)
10375

    
10376
  def Exec(self, feedback_fn):
10377
    """Modifies an instance.
10378

10379
    All parameters take effect only at the next restart of the instance.
10380

10381
    """
10382
    # Process here the warnings from CheckPrereq, as we don't have a
10383
    # feedback_fn there.
10384
    for warn in self.warn:
10385
      feedback_fn("WARNING: %s" % warn)
10386

    
10387
    result = []
10388
    instance = self.instance
10389
    # disk changes
10390
    for disk_op, disk_dict in self.op.disks:
10391
      if disk_op == constants.DDM_REMOVE:
10392
        # remove the last disk
10393
        device = instance.disks.pop()
10394
        device_idx = len(instance.disks)
10395
        for node, disk in device.ComputeNodeTree(instance.primary_node):
10396
          self.cfg.SetDiskID(disk, node)
10397
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
10398
          if msg:
10399
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
10400
                            " continuing anyway", device_idx, node, msg)
10401
        result.append(("disk/%d" % device_idx, "remove"))
10402
      elif disk_op == constants.DDM_ADD:
10403
        # add a new disk
10404
        if instance.disk_template in (constants.DT_FILE,
10405
                                        constants.DT_SHARED_FILE):
10406
          file_driver, file_path = instance.disks[0].logical_id
10407
          file_path = os.path.dirname(file_path)
10408
        else:
10409
          file_driver = file_path = None
10410
        disk_idx_base = len(instance.disks)
10411
        new_disk = _GenerateDiskTemplate(self,
10412
                                         instance.disk_template,
10413
                                         instance.name, instance.primary_node,
10414
                                         instance.secondary_nodes,
10415
                                         [disk_dict],
10416
                                         file_path,
10417
                                         file_driver,
10418
                                         disk_idx_base, feedback_fn)[0]
10419
        instance.disks.append(new_disk)
10420
        info = _GetInstanceInfoText(instance)
10421

    
10422
        logging.info("Creating volume %s for instance %s",
10423
                     new_disk.iv_name, instance.name)
10424
        # Note: this needs to be kept in sync with _CreateDisks
10425
        #HARDCODE
10426
        for node in instance.all_nodes:
10427
          f_create = node == instance.primary_node
10428
          try:
10429
            _CreateBlockDev(self, node, instance, new_disk,
10430
                            f_create, info, f_create)
10431
          except errors.OpExecError, err:
10432
            self.LogWarning("Failed to create volume %s (%s) on"
10433
                            " node %s: %s",
10434
                            new_disk.iv_name, new_disk, node, err)
10435
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
10436
                       (new_disk.size, new_disk.mode)))
10437
      else:
10438
        # change a given disk
10439
        instance.disks[disk_op].mode = disk_dict[constants.IDISK_MODE]
10440
        result.append(("disk.mode/%d" % disk_op,
10441
                       disk_dict[constants.IDISK_MODE]))
10442

    
10443
    if self.op.disk_template:
10444
      r_shut = _ShutdownInstanceDisks(self, instance)
10445
      if not r_shut:
10446
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
10447
                                 " proceed with disk template conversion")
10448
      mode = (instance.disk_template, self.op.disk_template)
10449
      try:
10450
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
10451
      except:
10452
        self.cfg.ReleaseDRBDMinors(instance.name)
10453
        raise
10454
      result.append(("disk_template", self.op.disk_template))
10455

    
10456
    # NIC changes
10457
    for nic_op, nic_dict in self.op.nics:
10458
      if nic_op == constants.DDM_REMOVE:
10459
        # remove the last nic
10460
        del instance.nics[-1]
10461
        result.append(("nic.%d" % len(instance.nics), "remove"))
10462
      elif nic_op == constants.DDM_ADD:
10463
        # mac and bridge should be set, by now
10464
        mac = nic_dict[constants.INIC_MAC]
10465
        ip = nic_dict.get(constants.INIC_IP, None)
10466
        nicparams = self.nic_pinst[constants.DDM_ADD]
10467
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
10468
        instance.nics.append(new_nic)
10469
        result.append(("nic.%d" % (len(instance.nics) - 1),
10470
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
10471
                       (new_nic.mac, new_nic.ip,
10472
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
10473
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
10474
                       )))
10475
      else:
10476
        for key in (constants.INIC_MAC, constants.INIC_IP):
10477
          if key in nic_dict:
10478
            setattr(instance.nics[nic_op], key, nic_dict[key])
10479
        if nic_op in self.nic_pinst:
10480
          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
10481
        for key, val in nic_dict.iteritems():
10482
          result.append(("nic.%s/%d" % (key, nic_op), val))
10483

    
10484
    # hvparams changes
10485
    if self.op.hvparams:
10486
      instance.hvparams = self.hv_inst
10487
      for key, val in self.op.hvparams.iteritems():
10488
        result.append(("hv/%s" % key, val))
10489

    
10490
    # beparams changes
10491
    if self.op.beparams:
10492
      instance.beparams = self.be_inst
10493
      for key, val in self.op.beparams.iteritems():
10494
        result.append(("be/%s" % key, val))
10495

    
10496
    # OS change
10497
    if self.op.os_name:
10498
      instance.os = self.op.os_name
10499

    
10500
    # osparams changes
10501
    if self.op.osparams:
10502
      instance.osparams = self.os_inst
10503
      for key, val in self.op.osparams.iteritems():
10504
        result.append(("os/%s" % key, val))
10505

    
10506
    self.cfg.Update(instance, feedback_fn)
10507

    
10508
    return result
10509

    
10510
  _DISK_CONVERSIONS = {
10511
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
10512
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
10513
    }
10514

    
10515

    
10516
class LUBackupQuery(NoHooksLU):
10517
  """Query the exports list
10518

10519
  """
10520
  REQ_BGL = False
10521

    
10522
  def ExpandNames(self):
10523
    self.needed_locks = {}
10524
    self.share_locks[locking.LEVEL_NODE] = 1
10525
    if not self.op.nodes:
10526
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10527
    else:
10528
      self.needed_locks[locking.LEVEL_NODE] = \
10529
        _GetWantedNodes(self, self.op.nodes)
10530

    
10531
  def Exec(self, feedback_fn):
10532
    """Compute the list of all the exported system images.
10533

10534
    @rtype: dict
10535
    @return: a dictionary with the structure node->(export-list)
10536
        where export-list is a list of the instances exported on
10537
        that node.
10538

10539
    """
10540
    self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
10541
    rpcresult = self.rpc.call_export_list(self.nodes)
10542
    result = {}
10543
    for node in rpcresult:
10544
      if rpcresult[node].fail_msg:
10545
        result[node] = False
10546
      else:
10547
        result[node] = rpcresult[node].payload
10548

    
10549
    return result
10550

    
10551

    
10552
class LUBackupPrepare(NoHooksLU):
10553
  """Prepares an instance for an export and returns useful information.
10554

10555
  """
10556
  REQ_BGL = False
10557

    
10558
  def ExpandNames(self):
10559
    self._ExpandAndLockInstance()
10560

    
10561
  def CheckPrereq(self):
10562
    """Check prerequisites.
10563

10564
    """
10565
    instance_name = self.op.instance_name
10566

    
10567
    self.instance = self.cfg.GetInstanceInfo(instance_name)
10568
    assert self.instance is not None, \
10569
          "Cannot retrieve locked instance %s" % self.op.instance_name
10570
    _CheckNodeOnline(self, self.instance.primary_node)
10571

    
10572
    self._cds = _GetClusterDomainSecret()
10573

    
10574
  def Exec(self, feedback_fn):
10575
    """Prepares an instance for an export.
10576

10577
    """
10578
    instance = self.instance
10579

    
10580
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
10581
      salt = utils.GenerateSecret(8)
10582

    
10583
      feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
10584
      result = self.rpc.call_x509_cert_create(instance.primary_node,
10585
                                              constants.RIE_CERT_VALIDITY)
10586
      result.Raise("Can't create X509 key and certificate on %s" % result.node)
10587

    
10588
      (name, cert_pem) = result.payload
10589

    
10590
      cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
10591
                                             cert_pem)
10592

    
10593
      return {
10594
        "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
10595
        "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
10596
                          salt),
10597
        "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
10598
        }
10599

    
10600
    return None
10601

    
10602

    
10603
class LUBackupExport(LogicalUnit):
10604
  """Export an instance to an image in the cluster.
10605

10606
  """
10607
  HPATH = "instance-export"
10608
  HTYPE = constants.HTYPE_INSTANCE
10609
  REQ_BGL = False
10610

    
10611
  def CheckArguments(self):
10612
    """Check the arguments.
10613

10614
    """
10615
    self.x509_key_name = self.op.x509_key_name
10616
    self.dest_x509_ca_pem = self.op.destination_x509_ca
10617

    
10618
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
10619
      if not self.x509_key_name:
10620
        raise errors.OpPrereqError("Missing X509 key name for encryption",
10621
                                   errors.ECODE_INVAL)
10622

    
10623
      if not self.dest_x509_ca_pem:
10624
        raise errors.OpPrereqError("Missing destination X509 CA",
10625
                                   errors.ECODE_INVAL)
10626

    
10627
  def ExpandNames(self):
10628
    self._ExpandAndLockInstance()
10629

    
10630
    # Lock all nodes for local exports
10631
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
10632
      # FIXME: lock only instance primary and destination node
10633
      #
10634
      # Sad but true, for now we have do lock all nodes, as we don't know where
10635
      # the previous export might be, and in this LU we search for it and
10636
      # remove it from its current node. In the future we could fix this by:
10637
      #  - making a tasklet to search (share-lock all), then create the
10638
      #    new one, then one to remove, after
10639
      #  - removing the removal operation altogether
10640
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10641

    
10642
  def DeclareLocks(self, level):
10643
    """Last minute lock declaration."""
10644
    # All nodes are locked anyway, so nothing to do here.
10645

    
10646
  def BuildHooksEnv(self):
10647
    """Build hooks env.
10648

10649
    This will run on the master, primary node and target node.
10650

10651
    """
10652
    env = {
10653
      "EXPORT_MODE": self.op.mode,
10654
      "EXPORT_NODE": self.op.target_node,
10655
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
10656
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
10657
      # TODO: Generic function for boolean env variables
10658
      "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
10659
      }
10660

    
10661
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
10662

    
10663
    return env
10664

    
10665
  def BuildHooksNodes(self):
10666
    """Build hooks nodes.
10667

10668
    """
10669
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
10670

    
10671
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
10672
      nl.append(self.op.target_node)
10673

    
10674
    return (nl, nl)
10675

    
10676
  def CheckPrereq(self):
10677
    """Check prerequisites.
10678

10679
    This checks that the instance and node names are valid.
10680

10681
    """
10682
    instance_name = self.op.instance_name
10683

    
10684
    self.instance = self.cfg.GetInstanceInfo(instance_name)
10685
    assert self.instance is not None, \
10686
          "Cannot retrieve locked instance %s" % self.op.instance_name
10687
    _CheckNodeOnline(self, self.instance.primary_node)
10688

    
10689
    if (self.op.remove_instance and self.instance.admin_up and
10690
        not self.op.shutdown):
10691
      raise errors.OpPrereqError("Can not remove instance without shutting it"
10692
                                 " down before")
10693

    
10694
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
10695
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
10696
      self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
10697
      assert self.dst_node is not None
10698

    
10699
      _CheckNodeOnline(self, self.dst_node.name)
10700
      _CheckNodeNotDrained(self, self.dst_node.name)
10701

    
10702
      self._cds = None
10703
      self.dest_disk_info = None
10704
      self.dest_x509_ca = None
10705

    
10706
    elif self.op.mode == constants.EXPORT_MODE_REMOTE:
10707
      self.dst_node = None
10708

    
10709
      if len(self.op.target_node) != len(self.instance.disks):
10710
        raise errors.OpPrereqError(("Received destination information for %s"
10711
                                    " disks, but instance %s has %s disks") %
10712
                                   (len(self.op.target_node), instance_name,
10713
                                    len(self.instance.disks)),
10714
                                   errors.ECODE_INVAL)
10715

    
10716
      cds = _GetClusterDomainSecret()
10717

    
10718
      # Check X509 key name
10719
      try:
10720
        (key_name, hmac_digest, hmac_salt) = self.x509_key_name
10721
      except (TypeError, ValueError), err:
10722
        raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
10723

    
10724
      if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
10725
        raise errors.OpPrereqError("HMAC for X509 key name is wrong",
10726
                                   errors.ECODE_INVAL)
10727

    
10728
      # Load and verify CA
10729
      try:
10730
        (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
10731
      except OpenSSL.crypto.Error, err:
10732
        raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
10733
                                   (err, ), errors.ECODE_INVAL)
10734

    
10735
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
10736
      if errcode is not None:
10737
        raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
10738
                                   (msg, ), errors.ECODE_INVAL)
10739

    
10740
      self.dest_x509_ca = cert
10741

    
10742
      # Verify target information
10743
      disk_info = []
10744
      for idx, disk_data in enumerate(self.op.target_node):
10745
        try:
10746
          (host, port, magic) = \
10747
            masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
10748
        except errors.GenericError, err:
10749
          raise errors.OpPrereqError("Target info for disk %s: %s" %
10750
                                     (idx, err), errors.ECODE_INVAL)
10751

    
10752
        disk_info.append((host, port, magic))
10753

    
10754
      assert len(disk_info) == len(self.op.target_node)
10755
      self.dest_disk_info = disk_info
10756

    
10757
    else:
10758
      raise errors.ProgrammerError("Unhandled export mode %r" %
10759
                                   self.op.mode)
10760

    
10761
    # instance disk type verification
10762
    # TODO: Implement export support for file-based disks
10763
    for disk in self.instance.disks:
10764
      if disk.dev_type == constants.LD_FILE:
10765
        raise errors.OpPrereqError("Export not supported for instances with"
10766
                                   " file-based disks", errors.ECODE_INVAL)
10767

    
10768
  def _CleanupExports(self, feedback_fn):
10769
    """Removes exports of current instance from all other nodes.
10770

10771
    If an instance in a cluster with nodes A..D was exported to node C, its
10772
    exports will be removed from the nodes A, B and D.
10773

10774
    """
10775
    assert self.op.mode != constants.EXPORT_MODE_REMOTE
10776

    
10777
    nodelist = self.cfg.GetNodeList()
10778
    nodelist.remove(self.dst_node.name)
10779

    
10780
    # on one-node clusters nodelist will be empty after the removal
10781
    # if we proceed the backup would be removed because OpBackupQuery
10782
    # substitutes an empty list with the full cluster node list.
10783
    iname = self.instance.name
10784
    if nodelist:
10785
      feedback_fn("Removing old exports for instance %s" % iname)
10786
      exportlist = self.rpc.call_export_list(nodelist)
10787
      for node in exportlist:
10788
        if exportlist[node].fail_msg:
10789
          continue
10790
        if iname in exportlist[node].payload:
10791
          msg = self.rpc.call_export_remove(node, iname).fail_msg
10792
          if msg:
10793
            self.LogWarning("Could not remove older export for instance %s"
10794
                            " on node %s: %s", iname, node, msg)
10795

    
10796
  def Exec(self, feedback_fn):
10797
    """Export an instance to an image in the cluster.
10798

10799
    """
10800
    assert self.op.mode in constants.EXPORT_MODES
10801

    
10802
    instance = self.instance
10803
    src_node = instance.primary_node
10804

    
10805
    if self.op.shutdown:
10806
      # shutdown the instance, but not the disks
10807
      feedback_fn("Shutting down instance %s" % instance.name)
10808
      result = self.rpc.call_instance_shutdown(src_node, instance,
10809
                                               self.op.shutdown_timeout)
10810
      # TODO: Maybe ignore failures if ignore_remove_failures is set
10811
      result.Raise("Could not shutdown instance %s on"
10812
                   " node %s" % (instance.name, src_node))
10813

    
10814
    # set the disks ID correctly since call_instance_start needs the
10815
    # correct drbd minor to create the symlinks
10816
    for disk in instance.disks:
10817
      self.cfg.SetDiskID(disk, src_node)
10818

    
10819
    activate_disks = (not instance.admin_up)
10820

    
10821
    if activate_disks:
10822
      # Activate the instance disks if we'exporting a stopped instance
10823
      feedback_fn("Activating disks for %s" % instance.name)
10824
      _StartInstanceDisks(self, instance, None)
10825

    
10826
    try:
10827
      helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
10828
                                                     instance)
10829

    
10830
      helper.CreateSnapshots()
10831
      try:
10832
        if (self.op.shutdown and instance.admin_up and
10833
            not self.op.remove_instance):
10834
          assert not activate_disks
10835
          feedback_fn("Starting instance %s" % instance.name)
10836
          result = self.rpc.call_instance_start(src_node, instance, None, None)
10837
          msg = result.fail_msg
10838
          if msg:
10839
            feedback_fn("Failed to start instance: %s" % msg)
10840
            _ShutdownInstanceDisks(self, instance)
10841
            raise errors.OpExecError("Could not start instance: %s" % msg)
10842

    
10843
        if self.op.mode == constants.EXPORT_MODE_LOCAL:
10844
          (fin_resu, dresults) = helper.LocalExport(self.dst_node)
10845
        elif self.op.mode == constants.EXPORT_MODE_REMOTE:
10846
          connect_timeout = constants.RIE_CONNECT_TIMEOUT
10847
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
10848

    
10849
          (key_name, _, _) = self.x509_key_name
10850

    
10851
          dest_ca_pem = \
10852
            OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
10853
                                            self.dest_x509_ca)
10854

    
10855
          (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
10856
                                                     key_name, dest_ca_pem,
10857
                                                     timeouts)
10858
      finally:
10859
        helper.Cleanup()
10860

    
10861
      # Check for backwards compatibility
10862
      assert len(dresults) == len(instance.disks)
10863
      assert compat.all(isinstance(i, bool) for i in dresults), \
10864
             "Not all results are boolean: %r" % dresults
10865

    
10866
    finally:
10867
      if activate_disks:
10868
        feedback_fn("Deactivating disks for %s" % instance.name)
10869
        _ShutdownInstanceDisks(self, instance)
10870

    
10871
    if not (compat.all(dresults) and fin_resu):
10872
      failures = []
10873
      if not fin_resu:
10874
        failures.append("export finalization")
10875
      if not compat.all(dresults):
10876
        fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
10877
                               if not dsk)
10878
        failures.append("disk export: disk(s) %s" % fdsk)
10879

    
10880
      raise errors.OpExecError("Export failed, errors in %s" %
10881
                               utils.CommaJoin(failures))
10882

    
10883
    # At this point, the export was successful, we can cleanup/finish
10884

    
10885
    # Remove instance if requested
10886
    if self.op.remove_instance:
10887
      feedback_fn("Removing instance %s" % instance.name)
10888
      _RemoveInstance(self, feedback_fn, instance,
10889
                      self.op.ignore_remove_failures)
10890

    
10891
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
10892
      self._CleanupExports(feedback_fn)
10893

    
10894
    return fin_resu, dresults
10895

    
10896

    
10897
class LUBackupRemove(NoHooksLU):
10898
  """Remove exports related to the named instance.
10899

10900
  """
10901
  REQ_BGL = False
10902

    
10903
  def ExpandNames(self):
10904
    self.needed_locks = {}
10905
    # We need all nodes to be locked in order for RemoveExport to work, but we
10906
    # don't need to lock the instance itself, as nothing will happen to it (and
10907
    # we can remove exports also for a removed instance)
10908
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
10909

    
10910
  def Exec(self, feedback_fn):
10911
    """Remove any export.
10912

10913
    """
10914
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
10915
    # If the instance was not found we'll try with the name that was passed in.
10916
    # This will only work if it was an FQDN, though.
10917
    fqdn_warn = False
10918
    if not instance_name:
10919
      fqdn_warn = True
10920
      instance_name = self.op.instance_name
10921

    
10922
    locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
10923
    exportlist = self.rpc.call_export_list(locked_nodes)
10924
    found = False
10925
    for node in exportlist:
10926
      msg = exportlist[node].fail_msg
10927
      if msg:
10928
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
10929
        continue
10930
      if instance_name in exportlist[node].payload:
10931
        found = True
10932
        result = self.rpc.call_export_remove(node, instance_name)
10933
        msg = result.fail_msg
10934
        if msg:
10935
          logging.error("Could not remove export for instance %s"
10936
                        " on node %s: %s", instance_name, node, msg)
10937

    
10938
    if fqdn_warn and not found:
10939
      feedback_fn("Export not found. If trying to remove an export belonging"
10940
                  " to a deleted instance please use its Fully Qualified"
10941
                  " Domain Name.")
10942

    
10943

    
10944
class LUGroupAdd(LogicalUnit):
10945
  """Logical unit for creating node groups.
10946

10947
  """
10948
  HPATH = "group-add"
10949
  HTYPE = constants.HTYPE_GROUP
10950
  REQ_BGL = False
10951

    
10952
  def ExpandNames(self):
10953
    # We need the new group's UUID here so that we can create and acquire the
10954
    # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
10955
    # that it should not check whether the UUID exists in the configuration.
10956
    self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
10957
    self.needed_locks = {}
10958
    self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
10959

    
10960
  def CheckPrereq(self):
10961
    """Check prerequisites.
10962

10963
    This checks that the given group name is not an existing node group
10964
    already.
10965

10966
    """
10967
    try:
10968
      existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
10969
    except errors.OpPrereqError:
10970
      pass
10971
    else:
10972
      raise errors.OpPrereqError("Desired group name '%s' already exists as a"
10973
                                 " node group (UUID: %s)" %
10974
                                 (self.op.group_name, existing_uuid),
10975
                                 errors.ECODE_EXISTS)
10976

    
10977
    if self.op.ndparams:
10978
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
10979

    
10980
  def BuildHooksEnv(self):
10981
    """Build hooks env.
10982

10983
    """
10984
    return {
10985
      "GROUP_NAME": self.op.group_name,
10986
      }
10987

    
10988
  def BuildHooksNodes(self):
10989
    """Build hooks nodes.
10990

10991
    """
10992
    mn = self.cfg.GetMasterNode()
10993
    return ([mn], [mn])
10994

    
10995
  def Exec(self, feedback_fn):
10996
    """Add the node group to the cluster.
10997

10998
    """
10999
    group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
11000
                                  uuid=self.group_uuid,
11001
                                  alloc_policy=self.op.alloc_policy,
11002
                                  ndparams=self.op.ndparams)
11003

    
11004
    self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
11005
    del self.remove_locks[locking.LEVEL_NODEGROUP]
11006

    
11007

    
11008
class LUGroupAssignNodes(NoHooksLU):
11009
  """Logical unit for assigning nodes to groups.
11010

11011
  """
11012
  REQ_BGL = False
11013

    
11014
  def ExpandNames(self):
11015
    # These raise errors.OpPrereqError on their own:
11016
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11017
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
11018

    
11019
    # We want to lock all the affected nodes and groups. We have readily
11020
    # available the list of nodes, and the *destination* group. To gather the
11021
    # list of "source" groups, we need to fetch node information later on.
11022
    self.needed_locks = {
11023
      locking.LEVEL_NODEGROUP: set([self.group_uuid]),
11024
      locking.LEVEL_NODE: self.op.nodes,
11025
      }
11026

    
11027
  def DeclareLocks(self, level):
11028
    if level == locking.LEVEL_NODEGROUP:
11029
      assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
11030

    
11031
      # Try to get all affected nodes' groups without having the group or node
11032
      # lock yet. Needs verification later in the code flow.
11033
      groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes)
11034

    
11035
      self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
11036

    
11037
  def CheckPrereq(self):
11038
    """Check prerequisites.
11039

11040
    """
11041
    assert self.needed_locks[locking.LEVEL_NODEGROUP]
11042
    assert (frozenset(self.glm.list_owned(locking.LEVEL_NODE)) ==
11043
            frozenset(self.op.nodes))
11044

    
11045
    expected_locks = (set([self.group_uuid]) |
11046
                      self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
11047
    actual_locks = self.glm.list_owned(locking.LEVEL_NODEGROUP)
11048
    if actual_locks != expected_locks:
11049
      raise errors.OpExecError("Nodes changed groups since locks were acquired,"
11050
                               " current groups are '%s', used to be '%s'" %
11051
                               (utils.CommaJoin(expected_locks),
11052
                                utils.CommaJoin(actual_locks)))
11053

    
11054
    self.node_data = self.cfg.GetAllNodesInfo()
11055
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
11056
    instance_data = self.cfg.GetAllInstancesInfo()
11057

    
11058
    if self.group is None:
11059
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
11060
                               (self.op.group_name, self.group_uuid))
11061

    
11062
    (new_splits, previous_splits) = \
11063
      self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
11064
                                             for node in self.op.nodes],
11065
                                            self.node_data, instance_data)
11066

    
11067
    if new_splits:
11068
      fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
11069

    
11070
      if not self.op.force:
11071
        raise errors.OpExecError("The following instances get split by this"
11072
                                 " change and --force was not given: %s" %
11073
                                 fmt_new_splits)
11074
      else:
11075
        self.LogWarning("This operation will split the following instances: %s",
11076
                        fmt_new_splits)
11077

    
11078
        if previous_splits:
11079
          self.LogWarning("In addition, these already-split instances continue"
11080
                          " to be split across groups: %s",
11081
                          utils.CommaJoin(utils.NiceSort(previous_splits)))
11082

    
11083
  def Exec(self, feedback_fn):
11084
    """Assign nodes to a new group.
11085

11086
    """
11087
    for node in self.op.nodes:
11088
      self.node_data[node].group = self.group_uuid
11089

    
11090
    # FIXME: Depends on side-effects of modifying the result of
11091
    # C{cfg.GetAllNodesInfo}
11092

    
11093
    self.cfg.Update(self.group, feedback_fn) # Saves all modified nodes.
11094

    
11095
  @staticmethod
11096
  def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
11097
    """Check for split instances after a node assignment.
11098

11099
    This method considers a series of node assignments as an atomic operation,
11100
    and returns information about split instances after applying the set of
11101
    changes.
11102

11103
    In particular, it returns information about newly split instances, and
11104
    instances that were already split, and remain so after the change.
11105

11106
    Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
11107
    considered.
11108

11109
    @type changes: list of (node_name, new_group_uuid) pairs.
11110
    @param changes: list of node assignments to consider.
11111
    @param node_data: a dict with data for all nodes
11112
    @param instance_data: a dict with all instances to consider
11113
    @rtype: a two-tuple
11114
    @return: a list of instances that were previously okay and result split as a
11115
      consequence of this change, and a list of instances that were previously
11116
      split and this change does not fix.
11117

11118
    """
11119
    changed_nodes = dict((node, group) for node, group in changes
11120
                         if node_data[node].group != group)
11121

    
11122
    all_split_instances = set()
11123
    previously_split_instances = set()
11124

    
11125
    def InstanceNodes(instance):
11126
      return [instance.primary_node] + list(instance.secondary_nodes)
11127

    
11128
    for inst in instance_data.values():
11129
      if inst.disk_template not in constants.DTS_INT_MIRROR:
11130
        continue
11131

    
11132
      instance_nodes = InstanceNodes(inst)
11133

    
11134
      if len(set(node_data[node].group for node in instance_nodes)) > 1:
11135
        previously_split_instances.add(inst.name)
11136

    
11137
      if len(set(changed_nodes.get(node, node_data[node].group)
11138
                 for node in instance_nodes)) > 1:
11139
        all_split_instances.add(inst.name)
11140

    
11141
    return (list(all_split_instances - previously_split_instances),
11142
            list(previously_split_instances & all_split_instances))
11143

    
11144

    
11145
class _GroupQuery(_QueryBase):
11146
  FIELDS = query.GROUP_FIELDS
11147

    
11148
  def ExpandNames(self, lu):
11149
    lu.needed_locks = {}
11150

    
11151
    self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
11152
    name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
11153

    
11154
    if not self.names:
11155
      self.wanted = [name_to_uuid[name]
11156
                     for name in utils.NiceSort(name_to_uuid.keys())]
11157
    else:
11158
      # Accept names to be either names or UUIDs.
11159
      missing = []
11160
      self.wanted = []
11161
      all_uuid = frozenset(self._all_groups.keys())
11162

    
11163
      for name in self.names:
11164
        if name in all_uuid:
11165
          self.wanted.append(name)
11166
        elif name in name_to_uuid:
11167
          self.wanted.append(name_to_uuid[name])
11168
        else:
11169
          missing.append(name)
11170

    
11171
      if missing:
11172
        raise errors.OpPrereqError("Some groups do not exist: %s" %
11173
                                   utils.CommaJoin(missing),
11174
                                   errors.ECODE_NOENT)
11175

    
11176
  def DeclareLocks(self, lu, level):
11177
    pass
11178

    
11179
  def _GetQueryData(self, lu):
11180
    """Computes the list of node groups and their attributes.
11181

11182
    """
11183
    do_nodes = query.GQ_NODE in self.requested_data
11184
    do_instances = query.GQ_INST in self.requested_data
11185

    
11186
    group_to_nodes = None
11187
    group_to_instances = None
11188

    
11189
    # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
11190
    # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
11191
    # latter GetAllInstancesInfo() is not enough, for we have to go through
11192
    # instance->node. Hence, we will need to process nodes even if we only need
11193
    # instance information.
11194
    if do_nodes or do_instances:
11195
      all_nodes = lu.cfg.GetAllNodesInfo()
11196
      group_to_nodes = dict((uuid, []) for uuid in self.wanted)
11197
      node_to_group = {}
11198

    
11199
      for node in all_nodes.values():
11200
        if node.group in group_to_nodes:
11201
          group_to_nodes[node.group].append(node.name)
11202
          node_to_group[node.name] = node.group
11203

    
11204
      if do_instances:
11205
        all_instances = lu.cfg.GetAllInstancesInfo()
11206
        group_to_instances = dict((uuid, []) for uuid in self.wanted)
11207

    
11208
        for instance in all_instances.values():
11209
          node = instance.primary_node
11210
          if node in node_to_group:
11211
            group_to_instances[node_to_group[node]].append(instance.name)
11212

    
11213
        if not do_nodes:
11214
          # Do not pass on node information if it was not requested.
11215
          group_to_nodes = None
11216

    
11217
    return query.GroupQueryData([self._all_groups[uuid]
11218
                                 for uuid in self.wanted],
11219
                                group_to_nodes, group_to_instances)
11220

    
11221

    
11222
class LUGroupQuery(NoHooksLU):
11223
  """Logical unit for querying node groups.
11224

11225
  """
11226
  REQ_BGL = False
11227

    
11228
  def CheckArguments(self):
11229
    self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
11230
                          self.op.output_fields, False)
11231

    
11232
  def ExpandNames(self):
11233
    self.gq.ExpandNames(self)
11234

    
11235
  def Exec(self, feedback_fn):
11236
    return self.gq.OldStyleQuery(self)
11237

    
11238

    
11239
class LUGroupSetParams(LogicalUnit):
11240
  """Modifies the parameters of a node group.
11241

11242
  """
11243
  HPATH = "group-modify"
11244
  HTYPE = constants.HTYPE_GROUP
11245
  REQ_BGL = False
11246

    
11247
  def CheckArguments(self):
11248
    all_changes = [
11249
      self.op.ndparams,
11250
      self.op.alloc_policy,
11251
      ]
11252

    
11253
    if all_changes.count(None) == len(all_changes):
11254
      raise errors.OpPrereqError("Please pass at least one modification",
11255
                                 errors.ECODE_INVAL)
11256

    
11257
  def ExpandNames(self):
11258
    # This raises errors.OpPrereqError on its own:
11259
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11260

    
11261
    self.needed_locks = {
11262
      locking.LEVEL_NODEGROUP: [self.group_uuid],
11263
      }
11264

    
11265
  def CheckPrereq(self):
11266
    """Check prerequisites.
11267

11268
    """
11269
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
11270

    
11271
    if self.group is None:
11272
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
11273
                               (self.op.group_name, self.group_uuid))
11274

    
11275
    if self.op.ndparams:
11276
      new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
11277
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
11278
      self.new_ndparams = new_ndparams
11279

    
11280
  def BuildHooksEnv(self):
11281
    """Build hooks env.
11282

11283
    """
11284
    return {
11285
      "GROUP_NAME": self.op.group_name,
11286
      "NEW_ALLOC_POLICY": self.op.alloc_policy,
11287
      }
11288

    
11289
  def BuildHooksNodes(self):
11290
    """Build hooks nodes.
11291

11292
    """
11293
    mn = self.cfg.GetMasterNode()
11294
    return ([mn], [mn])
11295

    
11296
  def Exec(self, feedback_fn):
11297
    """Modifies the node group.
11298

11299
    """
11300
    result = []
11301

    
11302
    if self.op.ndparams:
11303
      self.group.ndparams = self.new_ndparams
11304
      result.append(("ndparams", str(self.group.ndparams)))
11305

    
11306
    if self.op.alloc_policy:
11307
      self.group.alloc_policy = self.op.alloc_policy
11308

    
11309
    self.cfg.Update(self.group, feedback_fn)
11310
    return result
11311

    
11312

    
11313

    
11314
class LUGroupRemove(LogicalUnit):
11315
  HPATH = "group-remove"
11316
  HTYPE = constants.HTYPE_GROUP
11317
  REQ_BGL = False
11318

    
11319
  def ExpandNames(self):
11320
    # This will raises errors.OpPrereqError on its own:
11321
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11322
    self.needed_locks = {
11323
      locking.LEVEL_NODEGROUP: [self.group_uuid],
11324
      }
11325

    
11326
  def CheckPrereq(self):
11327
    """Check prerequisites.
11328

11329
    This checks that the given group name exists as a node group, that is
11330
    empty (i.e., contains no nodes), and that is not the last group of the
11331
    cluster.
11332

11333
    """
11334
    # Verify that the group is empty.
11335
    group_nodes = [node.name
11336
                   for node in self.cfg.GetAllNodesInfo().values()
11337
                   if node.group == self.group_uuid]
11338

    
11339
    if group_nodes:
11340
      raise errors.OpPrereqError("Group '%s' not empty, has the following"
11341
                                 " nodes: %s" %
11342
                                 (self.op.group_name,
11343
                                  utils.CommaJoin(utils.NiceSort(group_nodes))),
11344
                                 errors.ECODE_STATE)
11345

    
11346
    # Verify the cluster would not be left group-less.
11347
    if len(self.cfg.GetNodeGroupList()) == 1:
11348
      raise errors.OpPrereqError("Group '%s' is the only group,"
11349
                                 " cannot be removed" %
11350
                                 self.op.group_name,
11351
                                 errors.ECODE_STATE)
11352

    
11353
  def BuildHooksEnv(self):
11354
    """Build hooks env.
11355

11356
    """
11357
    return {
11358
      "GROUP_NAME": self.op.group_name,
11359
      }
11360

    
11361
  def BuildHooksNodes(self):
11362
    """Build hooks nodes.
11363

11364
    """
11365
    mn = self.cfg.GetMasterNode()
11366
    return ([mn], [mn])
11367

    
11368
  def Exec(self, feedback_fn):
11369
    """Remove the node group.
11370

11371
    """
11372
    try:
11373
      self.cfg.RemoveNodeGroup(self.group_uuid)
11374
    except errors.ConfigurationError:
11375
      raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
11376
                               (self.op.group_name, self.group_uuid))
11377

    
11378
    self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
11379

    
11380

    
11381
class LUGroupRename(LogicalUnit):
11382
  HPATH = "group-rename"
11383
  HTYPE = constants.HTYPE_GROUP
11384
  REQ_BGL = False
11385

    
11386
  def ExpandNames(self):
11387
    # This raises errors.OpPrereqError on its own:
11388
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11389

    
11390
    self.needed_locks = {
11391
      locking.LEVEL_NODEGROUP: [self.group_uuid],
11392
      }
11393

    
11394
  def CheckPrereq(self):
11395
    """Check prerequisites.
11396

11397
    Ensures requested new name is not yet used.
11398

11399
    """
11400
    try:
11401
      new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
11402
    except errors.OpPrereqError:
11403
      pass
11404
    else:
11405
      raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
11406
                                 " node group (UUID: %s)" %
11407
                                 (self.op.new_name, new_name_uuid),
11408
                                 errors.ECODE_EXISTS)
11409

    
11410
  def BuildHooksEnv(self):
11411
    """Build hooks env.
11412

11413
    """
11414
    return {
11415
      "OLD_NAME": self.op.group_name,
11416
      "NEW_NAME": self.op.new_name,
11417
      }
11418

    
11419
  def BuildHooksNodes(self):
11420
    """Build hooks nodes.
11421

11422
    """
11423
    mn = self.cfg.GetMasterNode()
11424

    
11425
    all_nodes = self.cfg.GetAllNodesInfo()
11426
    all_nodes.pop(mn, None)
11427

    
11428
    run_nodes = [mn]
11429
    run_nodes.extend(node.name for node in all_nodes.values()
11430
                     if node.group == self.group_uuid)
11431

    
11432
    return (run_nodes, run_nodes)
11433

    
11434
  def Exec(self, feedback_fn):
11435
    """Rename the node group.
11436

11437
    """
11438
    group = self.cfg.GetNodeGroup(self.group_uuid)
11439

    
11440
    if group is None:
11441
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
11442
                               (self.op.group_name, self.group_uuid))
11443

    
11444
    group.name = self.op.new_name
11445
    self.cfg.Update(group, feedback_fn)
11446

    
11447
    return self.op.new_name
11448

    
11449

    
11450
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
11451
  """Generic tags LU.
11452

11453
  This is an abstract class which is the parent of all the other tags LUs.
11454

11455
  """
11456
  def ExpandNames(self):
11457
    self.group_uuid = None
11458
    self.needed_locks = {}
11459
    if self.op.kind == constants.TAG_NODE:
11460
      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
11461
      self.needed_locks[locking.LEVEL_NODE] = self.op.name
11462
    elif self.op.kind == constants.TAG_INSTANCE:
11463
      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
11464
      self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
11465
    elif self.op.kind == constants.TAG_NODEGROUP:
11466
      self.group_uuid = self.cfg.LookupNodeGroup(self.op.name)
11467

    
11468
    # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
11469
    # not possible to acquire the BGL based on opcode parameters)
11470

    
11471
  def CheckPrereq(self):
11472
    """Check prerequisites.
11473

11474
    """
11475
    if self.op.kind == constants.TAG_CLUSTER:
11476
      self.target = self.cfg.GetClusterInfo()
11477
    elif self.op.kind == constants.TAG_NODE:
11478
      self.target = self.cfg.GetNodeInfo(self.op.name)
11479
    elif self.op.kind == constants.TAG_INSTANCE:
11480
      self.target = self.cfg.GetInstanceInfo(self.op.name)
11481
    elif self.op.kind == constants.TAG_NODEGROUP:
11482
      self.target = self.cfg.GetNodeGroup(self.group_uuid)
11483
    else:
11484
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
11485
                                 str(self.op.kind), errors.ECODE_INVAL)
11486

    
11487

    
11488
class LUTagsGet(TagsLU):
11489
  """Returns the tags of a given object.
11490

11491
  """
11492
  REQ_BGL = False
11493

    
11494
  def ExpandNames(self):
11495
    TagsLU.ExpandNames(self)
11496

    
11497
    # Share locks as this is only a read operation
11498
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
11499

    
11500
  def Exec(self, feedback_fn):
11501
    """Returns the tag list.
11502

11503
    """
11504
    return list(self.target.GetTags())
11505

    
11506

    
11507
class LUTagsSearch(NoHooksLU):
11508
  """Searches the tags for a given pattern.
11509

11510
  """
11511
  REQ_BGL = False
11512

    
11513
  def ExpandNames(self):
11514
    self.needed_locks = {}
11515

    
11516
  def CheckPrereq(self):
11517
    """Check prerequisites.
11518

11519
    This checks the pattern passed for validity by compiling it.
11520

11521
    """
11522
    try:
11523
      self.re = re.compile(self.op.pattern)
11524
    except re.error, err:
11525
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
11526
                                 (self.op.pattern, err), errors.ECODE_INVAL)
11527

    
11528
  def Exec(self, feedback_fn):
11529
    """Returns the tag list.
11530

11531
    """
11532
    cfg = self.cfg
11533
    tgts = [("/cluster", cfg.GetClusterInfo())]
11534
    ilist = cfg.GetAllInstancesInfo().values()
11535
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
11536
    nlist = cfg.GetAllNodesInfo().values()
11537
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
11538
    tgts.extend(("/nodegroup/%s" % n.name, n)
11539
                for n in cfg.GetAllNodeGroupsInfo().values())
11540
    results = []
11541
    for path, target in tgts:
11542
      for tag in target.GetTags():
11543
        if self.re.search(tag):
11544
          results.append((path, tag))
11545
    return results
11546

    
11547

    
11548
class LUTagsSet(TagsLU):
11549
  """Sets a tag on a given object.
11550

11551
  """
11552
  REQ_BGL = False
11553

    
11554
  def CheckPrereq(self):
11555
    """Check prerequisites.
11556

11557
    This checks the type and length of the tag name and value.
11558

11559
    """
11560
    TagsLU.CheckPrereq(self)
11561
    for tag in self.op.tags:
11562
      objects.TaggableObject.ValidateTag(tag)
11563

    
11564
  def Exec(self, feedback_fn):
11565
    """Sets the tag.
11566

11567
    """
11568
    try:
11569
      for tag in self.op.tags:
11570
        self.target.AddTag(tag)
11571
    except errors.TagError, err:
11572
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
11573
    self.cfg.Update(self.target, feedback_fn)
11574

    
11575

    
11576
class LUTagsDel(TagsLU):
11577
  """Delete a list of tags from a given object.
11578

11579
  """
11580
  REQ_BGL = False
11581

    
11582
  def CheckPrereq(self):
11583
    """Check prerequisites.
11584

11585
    This checks that we have the given tag.
11586

11587
    """
11588
    TagsLU.CheckPrereq(self)
11589
    for tag in self.op.tags:
11590
      objects.TaggableObject.ValidateTag(tag)
11591
    del_tags = frozenset(self.op.tags)
11592
    cur_tags = self.target.GetTags()
11593

    
11594
    diff_tags = del_tags - cur_tags
11595
    if diff_tags:
11596
      diff_names = ("'%s'" % i for i in sorted(diff_tags))
11597
      raise errors.OpPrereqError("Tag(s) %s not found" %
11598
                                 (utils.CommaJoin(diff_names), ),
11599
                                 errors.ECODE_NOENT)
11600

    
11601
  def Exec(self, feedback_fn):
11602
    """Remove the tag from the object.
11603

11604
    """
11605
    for tag in self.op.tags:
11606
      self.target.RemoveTag(tag)
11607
    self.cfg.Update(self.target, feedback_fn)
11608

    
11609

    
11610
class LUTestDelay(NoHooksLU):
11611
  """Sleep for a specified amount of time.
11612

11613
  This LU sleeps on the master and/or nodes for a specified amount of
11614
  time.
11615

11616
  """
11617
  REQ_BGL = False
11618

    
11619
  def ExpandNames(self):
11620
    """Expand names and set required locks.
11621

11622
    This expands the node list, if any.
11623

11624
    """
11625
    self.needed_locks = {}
11626
    if self.op.on_nodes:
11627
      # _GetWantedNodes can be used here, but is not always appropriate to use
11628
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
11629
      # more information.
11630
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
11631
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
11632

    
11633
  def _TestDelay(self):
11634
    """Do the actual sleep.
11635

11636
    """
11637
    if self.op.on_master:
11638
      if not utils.TestDelay(self.op.duration):
11639
        raise errors.OpExecError("Error during master delay test")
11640
    if self.op.on_nodes:
11641
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
11642
      for node, node_result in result.items():
11643
        node_result.Raise("Failure during rpc call to node %s" % node)
11644

    
11645
  def Exec(self, feedback_fn):
11646
    """Execute the test delay opcode, with the wanted repetitions.
11647

11648
    """
11649
    if self.op.repeat == 0:
11650
      self._TestDelay()
11651
    else:
11652
      top_value = self.op.repeat - 1
11653
      for i in range(self.op.repeat):
11654
        self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
11655
        self._TestDelay()
11656

    
11657

    
11658
class LUTestJqueue(NoHooksLU):
11659
  """Utility LU to test some aspects of the job queue.
11660

11661
  """
11662
  REQ_BGL = False
11663

    
11664
  # Must be lower than default timeout for WaitForJobChange to see whether it
11665
  # notices changed jobs
11666
  _CLIENT_CONNECT_TIMEOUT = 20.0
11667
  _CLIENT_CONFIRM_TIMEOUT = 60.0
11668

    
11669
  @classmethod
11670
  def _NotifyUsingSocket(cls, cb, errcls):
11671
    """Opens a Unix socket and waits for another program to connect.
11672

11673
    @type cb: callable
11674
    @param cb: Callback to send socket name to client
11675
    @type errcls: class
11676
    @param errcls: Exception class to use for errors
11677

11678
    """
11679
    # Using a temporary directory as there's no easy way to create temporary
11680
    # sockets without writing a custom loop around tempfile.mktemp and
11681
    # socket.bind
11682
    tmpdir = tempfile.mkdtemp()
11683
    try:
11684
      tmpsock = utils.PathJoin(tmpdir, "sock")
11685

    
11686
      logging.debug("Creating temporary socket at %s", tmpsock)
11687
      sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
11688
      try:
11689
        sock.bind(tmpsock)
11690
        sock.listen(1)
11691

    
11692
        # Send details to client
11693
        cb(tmpsock)
11694

    
11695
        # Wait for client to connect before continuing
11696
        sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
11697
        try:
11698
          (conn, _) = sock.accept()
11699
        except socket.error, err:
11700
          raise errcls("Client didn't connect in time (%s)" % err)
11701
      finally:
11702
        sock.close()
11703
    finally:
11704
      # Remove as soon as client is connected
11705
      shutil.rmtree(tmpdir)
11706

    
11707
    # Wait for client to close
11708
    try:
11709
      try:
11710
        # pylint: disable-msg=E1101
11711
        # Instance of '_socketobject' has no ... member
11712
        conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
11713
        conn.recv(1)
11714
      except socket.error, err:
11715
        raise errcls("Client failed to confirm notification (%s)" % err)
11716
    finally:
11717
      conn.close()
11718

    
11719
  def _SendNotification(self, test, arg, sockname):
11720
    """Sends a notification to the client.
11721

11722
    @type test: string
11723
    @param test: Test name
11724
    @param arg: Test argument (depends on test)
11725
    @type sockname: string
11726
    @param sockname: Socket path
11727

11728
    """
11729
    self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
11730

    
11731
  def _Notify(self, prereq, test, arg):
11732
    """Notifies the client of a test.
11733

11734
    @type prereq: bool
11735
    @param prereq: Whether this is a prereq-phase test
11736
    @type test: string
11737
    @param test: Test name
11738
    @param arg: Test argument (depends on test)
11739

11740
    """
11741
    if prereq:
11742
      errcls = errors.OpPrereqError
11743
    else:
11744
      errcls = errors.OpExecError
11745

    
11746
    return self._NotifyUsingSocket(compat.partial(self._SendNotification,
11747
                                                  test, arg),
11748
                                   errcls)
11749

    
11750
  def CheckArguments(self):
11751
    self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
11752
    self.expandnames_calls = 0
11753

    
11754
  def ExpandNames(self):
11755
    checkargs_calls = getattr(self, "checkargs_calls", 0)
11756
    if checkargs_calls < 1:
11757
      raise errors.ProgrammerError("CheckArguments was not called")
11758

    
11759
    self.expandnames_calls += 1
11760

    
11761
    if self.op.notify_waitlock:
11762
      self._Notify(True, constants.JQT_EXPANDNAMES, None)
11763

    
11764
    self.LogInfo("Expanding names")
11765

    
11766
    # Get lock on master node (just to get a lock, not for a particular reason)
11767
    self.needed_locks = {
11768
      locking.LEVEL_NODE: self.cfg.GetMasterNode(),
11769
      }
11770

    
11771
  def Exec(self, feedback_fn):
11772
    if self.expandnames_calls < 1:
11773
      raise errors.ProgrammerError("ExpandNames was not called")
11774

    
11775
    if self.op.notify_exec:
11776
      self._Notify(False, constants.JQT_EXEC, None)
11777

    
11778
    self.LogInfo("Executing")
11779

    
11780
    if self.op.log_messages:
11781
      self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
11782
      for idx, msg in enumerate(self.op.log_messages):
11783
        self.LogInfo("Sending log message %s", idx + 1)
11784
        feedback_fn(constants.JQT_MSGPREFIX + msg)
11785
        # Report how many test messages have been sent
11786
        self._Notify(False, constants.JQT_LOGMSG, idx + 1)
11787

    
11788
    if self.op.fail:
11789
      raise errors.OpExecError("Opcode failure was requested")
11790

    
11791
    return True
11792

    
11793

    
11794
class IAllocator(object):
11795
  """IAllocator framework.
11796

11797
  An IAllocator instance has three sets of attributes:
11798
    - cfg that is needed to query the cluster
11799
    - input data (all members of the _KEYS class attribute are required)
11800
    - four buffer attributes (in|out_data|text), that represent the
11801
      input (to the external script) in text and data structure format,
11802
      and the output from it, again in two formats
11803
    - the result variables from the script (success, info, nodes) for
11804
      easy usage
11805

11806
  """
11807
  # pylint: disable-msg=R0902
11808
  # lots of instance attributes
11809

    
11810
  def __init__(self, cfg, rpc, mode, **kwargs):
11811
    self.cfg = cfg
11812
    self.rpc = rpc
11813
    # init buffer variables
11814
    self.in_text = self.out_text = self.in_data = self.out_data = None
11815
    # init all input fields so that pylint is happy
11816
    self.mode = mode
11817
    self.mem_size = self.disks = self.disk_template = None
11818
    self.os = self.tags = self.nics = self.vcpus = None
11819
    self.hypervisor = None
11820
    self.relocate_from = None
11821
    self.name = None
11822
    self.evac_nodes = None
11823
    self.instances = None
11824
    self.reloc_mode = None
11825
    self.target_groups = None
11826
    # computed fields
11827
    self.required_nodes = None
11828
    # init result fields
11829
    self.success = self.info = self.result = None
11830

    
11831
    try:
11832
      (fn, keyset, self._result_check) = self._MODE_DATA[self.mode]
11833
    except KeyError:
11834
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
11835
                                   " IAllocator" % self.mode)
11836

    
11837
    for key in kwargs:
11838
      if key not in keyset:
11839
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
11840
                                     " IAllocator" % key)
11841
      setattr(self, key, kwargs[key])
11842

    
11843
    for key in keyset:
11844
      if key not in kwargs:
11845
        raise errors.ProgrammerError("Missing input parameter '%s' to"
11846
                                     " IAllocator" % key)
11847
    self._BuildInputData(compat.partial(fn, self))
11848

    
11849
  def _ComputeClusterData(self):
11850
    """Compute the generic allocator input data.
11851

11852
    This is the data that is independent of the actual operation.
11853

11854
    """
11855
    cfg = self.cfg
11856
    cluster_info = cfg.GetClusterInfo()
11857
    # cluster data
11858
    data = {
11859
      "version": constants.IALLOCATOR_VERSION,
11860
      "cluster_name": cfg.GetClusterName(),
11861
      "cluster_tags": list(cluster_info.GetTags()),
11862
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
11863
      # we don't have job IDs
11864
      }
11865
    ninfo = cfg.GetAllNodesInfo()
11866
    iinfo = cfg.GetAllInstancesInfo().values()
11867
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
11868

    
11869
    # node data
11870
    node_list = [n.name for n in ninfo.values() if n.vm_capable]
11871

    
11872
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
11873
      hypervisor_name = self.hypervisor
11874
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
11875
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
11876
    elif self.mode in (constants.IALLOCATOR_MODE_MEVAC,
11877
                       constants.IALLOCATOR_MODE_MRELOC):
11878
      hypervisor_name = cluster_info.enabled_hypervisors[0]
11879

    
11880
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
11881
                                        hypervisor_name)
11882
    node_iinfo = \
11883
      self.rpc.call_all_instances_info(node_list,
11884
                                       cluster_info.enabled_hypervisors)
11885

    
11886
    data["nodegroups"] = self._ComputeNodeGroupData(cfg)
11887

    
11888
    config_ndata = self._ComputeBasicNodeData(ninfo)
11889
    data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
11890
                                                 i_list, config_ndata)
11891
    assert len(data["nodes"]) == len(ninfo), \
11892
        "Incomplete node data computed"
11893

    
11894
    data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
11895

    
11896
    self.in_data = data
11897

    
11898
  @staticmethod
11899
  def _ComputeNodeGroupData(cfg):
11900
    """Compute node groups data.
11901

11902
    """
11903
    ng = dict((guuid, {
11904
      "name": gdata.name,
11905
      "alloc_policy": gdata.alloc_policy,
11906
      })
11907
      for guuid, gdata in cfg.GetAllNodeGroupsInfo().items())
11908

    
11909
    return ng
11910

    
11911
  @staticmethod
11912
  def _ComputeBasicNodeData(node_cfg):
11913
    """Compute global node data.
11914

11915
    @rtype: dict
11916
    @returns: a dict of name: (node dict, node config)
11917

11918
    """
11919
    # fill in static (config-based) values
11920
    node_results = dict((ninfo.name, {
11921
      "tags": list(ninfo.GetTags()),
11922
      "primary_ip": ninfo.primary_ip,
11923
      "secondary_ip": ninfo.secondary_ip,
11924
      "offline": ninfo.offline,
11925
      "drained": ninfo.drained,
11926
      "master_candidate": ninfo.master_candidate,
11927
      "group": ninfo.group,
11928
      "master_capable": ninfo.master_capable,
11929
      "vm_capable": ninfo.vm_capable,
11930
      })
11931
      for ninfo in node_cfg.values())
11932

    
11933
    return node_results
11934

    
11935
  @staticmethod
11936
  def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
11937
                              node_results):
11938
    """Compute global node data.
11939

11940
    @param node_results: the basic node structures as filled from the config
11941

11942
    """
11943
    # make a copy of the current dict
11944
    node_results = dict(node_results)
11945
    for nname, nresult in node_data.items():
11946
      assert nname in node_results, "Missing basic data for node %s" % nname
11947
      ninfo = node_cfg[nname]
11948

    
11949
      if not (ninfo.offline or ninfo.drained):
11950
        nresult.Raise("Can't get data for node %s" % nname)
11951
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
11952
                                nname)
11953
        remote_info = nresult.payload
11954

    
11955
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
11956
                     'vg_size', 'vg_free', 'cpu_total']:
11957
          if attr not in remote_info:
11958
            raise errors.OpExecError("Node '%s' didn't return attribute"
11959
                                     " '%s'" % (nname, attr))
11960
          if not isinstance(remote_info[attr], int):
11961
            raise errors.OpExecError("Node '%s' returned invalid value"
11962
                                     " for '%s': %s" %
11963
                                     (nname, attr, remote_info[attr]))
11964
        # compute memory used by primary instances
11965
        i_p_mem = i_p_up_mem = 0
11966
        for iinfo, beinfo in i_list:
11967
          if iinfo.primary_node == nname:
11968
            i_p_mem += beinfo[constants.BE_MEMORY]
11969
            if iinfo.name not in node_iinfo[nname].payload:
11970
              i_used_mem = 0
11971
            else:
11972
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
11973
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
11974
            remote_info['memory_free'] -= max(0, i_mem_diff)
11975

    
11976
            if iinfo.admin_up:
11977
              i_p_up_mem += beinfo[constants.BE_MEMORY]
11978

    
11979
        # compute memory used by instances
11980
        pnr_dyn = {
11981
          "total_memory": remote_info['memory_total'],
11982
          "reserved_memory": remote_info['memory_dom0'],
11983
          "free_memory": remote_info['memory_free'],
11984
          "total_disk": remote_info['vg_size'],
11985
          "free_disk": remote_info['vg_free'],
11986
          "total_cpus": remote_info['cpu_total'],
11987
          "i_pri_memory": i_p_mem,
11988
          "i_pri_up_memory": i_p_up_mem,
11989
          }
11990
        pnr_dyn.update(node_results[nname])
11991
        node_results[nname] = pnr_dyn
11992

    
11993
    return node_results
11994

    
11995
  @staticmethod
11996
  def _ComputeInstanceData(cluster_info, i_list):
11997
    """Compute global instance data.
11998

11999
    """
12000
    instance_data = {}
12001
    for iinfo, beinfo in i_list:
12002
      nic_data = []
12003
      for nic in iinfo.nics:
12004
        filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
12005
        nic_dict = {
12006
          "mac": nic.mac,
12007
          "ip": nic.ip,
12008
          "mode": filled_params[constants.NIC_MODE],
12009
          "link": filled_params[constants.NIC_LINK],
12010
          }
12011
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
12012
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
12013
        nic_data.append(nic_dict)
12014
      pir = {
12015
        "tags": list(iinfo.GetTags()),
12016
        "admin_up": iinfo.admin_up,
12017
        "vcpus": beinfo[constants.BE_VCPUS],
12018
        "memory": beinfo[constants.BE_MEMORY],
12019
        "os": iinfo.os,
12020
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
12021
        "nics": nic_data,
12022
        "disks": [{constants.IDISK_SIZE: dsk.size,
12023
                   constants.IDISK_MODE: dsk.mode}
12024
                  for dsk in iinfo.disks],
12025
        "disk_template": iinfo.disk_template,
12026
        "hypervisor": iinfo.hypervisor,
12027
        }
12028
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
12029
                                                 pir["disks"])
12030
      instance_data[iinfo.name] = pir
12031

    
12032
    return instance_data
12033

    
12034
  def _AddNewInstance(self):
12035
    """Add new instance data to allocator structure.
12036

12037
    This in combination with _AllocatorGetClusterData will create the
12038
    correct structure needed as input for the allocator.
12039

12040
    The checks for the completeness of the opcode must have already been
12041
    done.
12042

12043
    """
12044
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
12045

    
12046
    if self.disk_template in constants.DTS_INT_MIRROR:
12047
      self.required_nodes = 2
12048
    else:
12049
      self.required_nodes = 1
12050

    
12051
    request = {
12052
      "name": self.name,
12053
      "disk_template": self.disk_template,
12054
      "tags": self.tags,
12055
      "os": self.os,
12056
      "vcpus": self.vcpus,
12057
      "memory": self.mem_size,
12058
      "disks": self.disks,
12059
      "disk_space_total": disk_space,
12060
      "nics": self.nics,
12061
      "required_nodes": self.required_nodes,
12062
      }
12063

    
12064
    return request
12065

    
12066
  def _AddRelocateInstance(self):
12067
    """Add relocate instance data to allocator structure.
12068

12069
    This in combination with _IAllocatorGetClusterData will create the
12070
    correct structure needed as input for the allocator.
12071

12072
    The checks for the completeness of the opcode must have already been
12073
    done.
12074

12075
    """
12076
    instance = self.cfg.GetInstanceInfo(self.name)
12077
    if instance is None:
12078
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
12079
                                   " IAllocator" % self.name)
12080

    
12081
    if instance.disk_template not in constants.DTS_MIRRORED:
12082
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
12083
                                 errors.ECODE_INVAL)
12084

    
12085
    if instance.disk_template in constants.DTS_INT_MIRROR and \
12086
        len(instance.secondary_nodes) != 1:
12087
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
12088
                                 errors.ECODE_STATE)
12089

    
12090
    self.required_nodes = 1
12091
    disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks]
12092
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
12093

    
12094
    request = {
12095
      "name": self.name,
12096
      "disk_space_total": disk_space,
12097
      "required_nodes": self.required_nodes,
12098
      "relocate_from": self.relocate_from,
12099
      }
12100
    return request
12101

    
12102
  def _AddEvacuateNodes(self):
12103
    """Add evacuate nodes data to allocator structure.
12104

12105
    """
12106
    request = {
12107
      "evac_nodes": self.evac_nodes
12108
      }
12109
    return request
12110

    
12111
  def _AddMultiRelocate(self):
12112
    """Get data for multi-relocate requests.
12113

12114
    """
12115
    return {
12116
      "instances": self.instances,
12117
      "reloc_mode": self.reloc_mode,
12118
      "target_groups": self.target_groups,
12119
      }
12120

    
12121
  def _BuildInputData(self, fn):
12122
    """Build input data structures.
12123

12124
    """
12125
    self._ComputeClusterData()
12126

    
12127
    request = fn()
12128
    request["type"] = self.mode
12129
    self.in_data["request"] = request
12130

    
12131
    self.in_text = serializer.Dump(self.in_data)
12132

    
12133
  _MODE_DATA = {
12134
    constants.IALLOCATOR_MODE_ALLOC:
12135
      (_AddNewInstance,
12136
       ["name", "mem_size", "disks", "disk_template", "os", "tags", "nics",
12137
        "vcpus", "hypervisor"], ht.TList),
12138
    constants.IALLOCATOR_MODE_RELOC:
12139
      (_AddRelocateInstance, ["name", "relocate_from"], ht.TList),
12140
    constants.IALLOCATOR_MODE_MEVAC:
12141
      (_AddEvacuateNodes, ["evac_nodes"],
12142
       ht.TListOf(ht.TAnd(ht.TIsLength(2),
12143
                          ht.TListOf(ht.TString)))),
12144
    constants.IALLOCATOR_MODE_MRELOC:
12145
      (_AddMultiRelocate, ["instances", "reloc_mode", "target_groups"],
12146
       ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
12147
         # pylint: disable-msg=E1101
12148
         # Class '...' has no 'OP_ID' member
12149
         "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
12150
                              opcodes.OpInstanceMigrate.OP_ID,
12151
                              opcodes.OpInstanceReplaceDisks.OP_ID])
12152
         })))),
12153
    }
12154

    
12155
  def Run(self, name, validate=True, call_fn=None):
12156
    """Run an instance allocator and return the results.
12157

12158
    """
12159
    if call_fn is None:
12160
      call_fn = self.rpc.call_iallocator_runner
12161

    
12162
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
12163
    result.Raise("Failure while running the iallocator script")
12164

    
12165
    self.out_text = result.payload
12166
    if validate:
12167
      self._ValidateResult()
12168

    
12169
  def _ValidateResult(self):
12170
    """Process the allocator results.
12171

12172
    This will process and if successful save the result in
12173
    self.out_data and the other parameters.
12174

12175
    """
12176
    try:
12177
      rdict = serializer.Load(self.out_text)
12178
    except Exception, err:
12179
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
12180

    
12181
    if not isinstance(rdict, dict):
12182
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
12183

    
12184
    # TODO: remove backwards compatiblity in later versions
12185
    if "nodes" in rdict and "result" not in rdict:
12186
      rdict["result"] = rdict["nodes"]
12187
      del rdict["nodes"]
12188

    
12189
    for key in "success", "info", "result":
12190
      if key not in rdict:
12191
        raise errors.OpExecError("Can't parse iallocator results:"
12192
                                 " missing key '%s'" % key)
12193
      setattr(self, key, rdict[key])
12194

    
12195
    if not self._result_check(self.result):
12196
      raise errors.OpExecError("Iallocator returned invalid result,"
12197
                               " expected %s, got %s" %
12198
                               (self._result_check, self.result),
12199
                               errors.ECODE_INVAL)
12200

    
12201
    if self.mode in (constants.IALLOCATOR_MODE_RELOC,
12202
                     constants.IALLOCATOR_MODE_MEVAC):
12203
      node2group = dict((name, ndata["group"])
12204
                        for (name, ndata) in self.in_data["nodes"].items())
12205

    
12206
      fn = compat.partial(self._NodesToGroups, node2group,
12207
                          self.in_data["nodegroups"])
12208

    
12209
      if self.mode == constants.IALLOCATOR_MODE_RELOC:
12210
        assert self.relocate_from is not None
12211
        assert self.required_nodes == 1
12212

    
12213
        request_groups = fn(self.relocate_from)
12214
        result_groups = fn(rdict["result"])
12215

    
12216
        if result_groups != request_groups:
12217
          raise errors.OpExecError("Groups of nodes returned by iallocator (%s)"
12218
                                   " differ from original groups (%s)" %
12219
                                   (utils.CommaJoin(result_groups),
12220
                                    utils.CommaJoin(request_groups)))
12221
      elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
12222
        request_groups = fn(self.evac_nodes)
12223
        for (instance_name, secnode) in self.result:
12224
          result_groups = fn([secnode])
12225
          if result_groups != request_groups:
12226
            raise errors.OpExecError("Iallocator returned new secondary node"
12227
                                     " '%s' (group '%s') for instance '%s'"
12228
                                     " which is not in original group '%s'" %
12229
                                     (secnode, utils.CommaJoin(result_groups),
12230
                                      instance_name,
12231
                                      utils.CommaJoin(request_groups)))
12232
      else:
12233
        raise errors.ProgrammerError("Unhandled mode '%s'" % self.mode)
12234

    
12235
    self.out_data = rdict
12236

    
12237
  @staticmethod
12238
  def _NodesToGroups(node2group, groups, nodes):
12239
    """Returns a list of unique group names for a list of nodes.
12240

12241
    @type node2group: dict
12242
    @param node2group: Map from node name to group UUID
12243
    @type groups: dict
12244
    @param groups: Group information
12245
    @type nodes: list
12246
    @param nodes: Node names
12247

12248
    """
12249
    result = set()
12250

    
12251
    for node in nodes:
12252
      try:
12253
        group_uuid = node2group[node]
12254
      except KeyError:
12255
        # Ignore unknown node
12256
        pass
12257
      else:
12258
        try:
12259
          group = groups[group_uuid]
12260
        except KeyError:
12261
          # Can't find group, let's use UUID
12262
          group_name = group_uuid
12263
        else:
12264
          group_name = group["name"]
12265

    
12266
        result.add(group_name)
12267

    
12268
    return sorted(result)
12269

    
12270

    
12271
class LUTestAllocator(NoHooksLU):
12272
  """Run allocator tests.
12273

12274
  This LU runs the allocator tests
12275

12276
  """
12277
  def CheckPrereq(self):
12278
    """Check prerequisites.
12279

12280
    This checks the opcode parameters depending on the director and mode test.
12281

12282
    """
12283
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
12284
      for attr in ["mem_size", "disks", "disk_template",
12285
                   "os", "tags", "nics", "vcpus"]:
12286
        if not hasattr(self.op, attr):
12287
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
12288
                                     attr, errors.ECODE_INVAL)
12289
      iname = self.cfg.ExpandInstanceName(self.op.name)
12290
      if iname is not None:
12291
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
12292
                                   iname, errors.ECODE_EXISTS)
12293
      if not isinstance(self.op.nics, list):
12294
        raise errors.OpPrereqError("Invalid parameter 'nics'",
12295
                                   errors.ECODE_INVAL)
12296
      if not isinstance(self.op.disks, list):
12297
        raise errors.OpPrereqError("Invalid parameter 'disks'",
12298
                                   errors.ECODE_INVAL)
12299
      for row in self.op.disks:
12300
        if (not isinstance(row, dict) or
12301
            "size" not in row or
12302
            not isinstance(row["size"], int) or
12303
            "mode" not in row or
12304
            row["mode"] not in ['r', 'w']):
12305
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
12306
                                     " parameter", errors.ECODE_INVAL)
12307
      if self.op.hypervisor is None:
12308
        self.op.hypervisor = self.cfg.GetHypervisorType()
12309
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
12310
      fname = _ExpandInstanceName(self.cfg, self.op.name)
12311
      self.op.name = fname
12312
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
12313
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
12314
      if not hasattr(self.op, "evac_nodes"):
12315
        raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
12316
                                   " opcode input", errors.ECODE_INVAL)
12317
    elif self.op.mode == constants.IALLOCATOR_MODE_MRELOC:
12318
      if self.op.instances:
12319
        self.op.instances = _GetWantedInstances(self, self.op.instances)
12320
      else:
12321
        raise errors.OpPrereqError("Missing instances to relocate",
12322
                                   errors.ECODE_INVAL)
12323
    else:
12324
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
12325
                                 self.op.mode, errors.ECODE_INVAL)
12326

    
12327
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
12328
      if self.op.allocator is None:
12329
        raise errors.OpPrereqError("Missing allocator name",
12330
                                   errors.ECODE_INVAL)
12331
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
12332
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
12333
                                 self.op.direction, errors.ECODE_INVAL)
12334

    
12335
  def Exec(self, feedback_fn):
12336
    """Run the allocator test.
12337

12338
    """
12339
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
12340
      ial = IAllocator(self.cfg, self.rpc,
12341
                       mode=self.op.mode,
12342
                       name=self.op.name,
12343
                       mem_size=self.op.mem_size,
12344
                       disks=self.op.disks,
12345
                       disk_template=self.op.disk_template,
12346
                       os=self.op.os,
12347
                       tags=self.op.tags,
12348
                       nics=self.op.nics,
12349
                       vcpus=self.op.vcpus,
12350
                       hypervisor=self.op.hypervisor,
12351
                       )
12352
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
12353
      ial = IAllocator(self.cfg, self.rpc,
12354
                       mode=self.op.mode,
12355
                       name=self.op.name,
12356
                       relocate_from=list(self.relocate_from),
12357
                       )
12358
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
12359
      ial = IAllocator(self.cfg, self.rpc,
12360
                       mode=self.op.mode,
12361
                       evac_nodes=self.op.evac_nodes)
12362
    elif self.op.mode == constants.IALLOCATOR_MODE_MRELOC:
12363
      ial = IAllocator(self.cfg, self.rpc,
12364
                       mode=self.op.mode,
12365
                       instances=self.op.instances,
12366
                       reloc_mode=self.op.reloc_mode,
12367
                       target_groups=self.op.target_groups)
12368
    else:
12369
      raise errors.ProgrammerError("Uncatched mode %s in"
12370
                                   " LUTestAllocator.Exec", self.op.mode)
12371

    
12372
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
12373
      result = ial.in_text
12374
    else:
12375
      ial.Run(self.op.allocator, validate=False)
12376
      result = ial.out_text
12377
    return result
12378

    
12379

    
12380
#: Query type implementations
12381
_QUERY_IMPL = {
12382
  constants.QR_INSTANCE: _InstanceQuery,
12383
  constants.QR_NODE: _NodeQuery,
12384
  constants.QR_GROUP: _GroupQuery,
12385
  constants.QR_OS: _OsQuery,
12386
  }
12387

    
12388
assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP
12389

    
12390

    
12391
def _GetQueryImplementation(name):
12392
  """Returns the implemtnation for a query type.
12393

12394
  @param name: Query type, must be one of L{constants.QR_VIA_OP}
12395

12396
  """
12397
  try:
12398
    return _QUERY_IMPL[name]
12399
  except KeyError:
12400
    raise errors.OpPrereqError("Unknown query resource '%s'" % name,
12401
                               errors.ECODE_INVAL)