Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 61643226

History | View | Annotate | Download (477 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module implementing the master-side code."""
23

    
24
# pylint: disable=W0201,C0302
25

    
26
# W0201 since most LU attributes are defined in CheckPrereq or similar
27
# functions
28

    
29
# C0302: since we have waaaay too many lines in this module
30

    
31
import os
32
import os.path
33
import time
34
import re
35
import platform
36
import logging
37
import copy
38
import OpenSSL
39
import socket
40
import tempfile
41
import shutil
42
import itertools
43
import operator
44

    
45
from ganeti import ssh
46
from ganeti import utils
47
from ganeti import errors
48
from ganeti import hypervisor
49
from ganeti import locking
50
from ganeti import constants
51
from ganeti import objects
52
from ganeti import serializer
53
from ganeti import ssconf
54
from ganeti import uidpool
55
from ganeti import compat
56
from ganeti import masterd
57
from ganeti import netutils
58
from ganeti import query
59
from ganeti import qlang
60
from ganeti import opcodes
61
from ganeti import ht
62

    
63
import ganeti.masterd.instance # pylint: disable=W0611
64

    
65

    
66
class ResultWithJobs:
67
  """Data container for LU results with jobs.
68

69
  Instances of this class returned from L{LogicalUnit.Exec} will be recognized
70
  by L{mcpu.Processor._ProcessResult}. The latter will then submit the jobs
71
  contained in the C{jobs} attribute and include the job IDs in the opcode
72
  result.
73

74
  """
75
  def __init__(self, jobs, **kwargs):
76
    """Initializes this class.
77

78
    Additional return values can be specified as keyword arguments.
79

80
    @type jobs: list of lists of L{opcode.OpCode}
81
    @param jobs: A list of lists of opcode objects
82

83
    """
84
    self.jobs = jobs
85
    self.other = kwargs
86

    
87

    
88
class LogicalUnit(object):
89
  """Logical Unit base class.
90

91
  Subclasses must follow these rules:
92
    - implement ExpandNames
93
    - implement CheckPrereq (except when tasklets are used)
94
    - implement Exec (except when tasklets are used)
95
    - implement BuildHooksEnv
96
    - implement BuildHooksNodes
97
    - redefine HPATH and HTYPE
98
    - optionally redefine their run requirements:
99
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
100

101
  Note that all commands require root permissions.
102

103
  @ivar dry_run_result: the value (if any) that will be returned to the caller
104
      in dry-run mode (signalled by opcode dry_run parameter)
105

106
  """
107
  HPATH = None
108
  HTYPE = None
109
  REQ_BGL = True
110

    
111
  def __init__(self, processor, op, context, rpc):
112
    """Constructor for LogicalUnit.
113

114
    This needs to be overridden in derived classes in order to check op
115
    validity.
116

117
    """
118
    self.proc = processor
119
    self.op = op
120
    self.cfg = context.cfg
121
    self.glm = context.glm
122
    # readability alias
123
    self.owned_locks = context.glm.list_owned
124
    self.context = context
125
    self.rpc = rpc
126
    # Dicts used to declare locking needs to mcpu
127
    self.needed_locks = None
128
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
129
    self.add_locks = {}
130
    self.remove_locks = {}
131
    # Used to force good behavior when calling helper functions
132
    self.recalculate_locks = {}
133
    # logging
134
    self.Log = processor.Log # pylint: disable=C0103
135
    self.LogWarning = processor.LogWarning # pylint: disable=C0103
136
    self.LogInfo = processor.LogInfo # pylint: disable=C0103
137
    self.LogStep = processor.LogStep # pylint: disable=C0103
138
    # support for dry-run
139
    self.dry_run_result = None
140
    # support for generic debug attribute
141
    if (not hasattr(self.op, "debug_level") or
142
        not isinstance(self.op.debug_level, int)):
143
      self.op.debug_level = 0
144

    
145
    # Tasklets
146
    self.tasklets = None
147

    
148
    # Validate opcode parameters and set defaults
149
    self.op.Validate(True)
150

    
151
    self.CheckArguments()
152

    
153
  def CheckArguments(self):
154
    """Check syntactic validity for the opcode arguments.
155

156
    This method is for doing a simple syntactic check and ensure
157
    validity of opcode parameters, without any cluster-related
158
    checks. While the same can be accomplished in ExpandNames and/or
159
    CheckPrereq, doing these separate is better because:
160

161
      - ExpandNames is left as as purely a lock-related function
162
      - CheckPrereq is run after we have acquired locks (and possible
163
        waited for them)
164

165
    The function is allowed to change the self.op attribute so that
166
    later methods can no longer worry about missing parameters.
167

168
    """
169
    pass
170

    
171
  def ExpandNames(self):
172
    """Expand names for this LU.
173

174
    This method is called before starting to execute the opcode, and it should
175
    update all the parameters of the opcode to their canonical form (e.g. a
176
    short node name must be fully expanded after this method has successfully
177
    completed). This way locking, hooks, logging, etc. can work correctly.
178

179
    LUs which implement this method must also populate the self.needed_locks
180
    member, as a dict with lock levels as keys, and a list of needed lock names
181
    as values. Rules:
182

183
      - use an empty dict if you don't need any lock
184
      - if you don't need any lock at a particular level omit that level
185
      - don't put anything for the BGL level
186
      - if you want all locks at a level use locking.ALL_SET as a value
187

188
    If you need to share locks (rather than acquire them exclusively) at one
189
    level you can modify self.share_locks, setting a true value (usually 1) for
190
    that level. By default locks are not shared.
191

192
    This function can also define a list of tasklets, which then will be
193
    executed in order instead of the usual LU-level CheckPrereq and Exec
194
    functions, if those are not defined by the LU.
195

196
    Examples::
197

198
      # Acquire all nodes and one instance
199
      self.needed_locks = {
200
        locking.LEVEL_NODE: locking.ALL_SET,
201
        locking.LEVEL_INSTANCE: ['instance1.example.com'],
202
      }
203
      # Acquire just two nodes
204
      self.needed_locks = {
205
        locking.LEVEL_NODE: ['node1.example.com', 'node2.example.com'],
206
      }
207
      # Acquire no locks
208
      self.needed_locks = {} # No, you can't leave it to the default value None
209

210
    """
211
    # The implementation of this method is mandatory only if the new LU is
212
    # concurrent, so that old LUs don't need to be changed all at the same
213
    # time.
214
    if self.REQ_BGL:
215
      self.needed_locks = {} # Exclusive LUs don't need locks.
216
    else:
217
      raise NotImplementedError
218

    
219
  def DeclareLocks(self, level):
220
    """Declare LU locking needs for a level
221

222
    While most LUs can just declare their locking needs at ExpandNames time,
223
    sometimes there's the need to calculate some locks after having acquired
224
    the ones before. This function is called just before acquiring locks at a
225
    particular level, but after acquiring the ones at lower levels, and permits
226
    such calculations. It can be used to modify self.needed_locks, and by
227
    default it does nothing.
228

229
    This function is only called if you have something already set in
230
    self.needed_locks for the level.
231

232
    @param level: Locking level which is going to be locked
233
    @type level: member of ganeti.locking.LEVELS
234

235
    """
236

    
237
  def CheckPrereq(self):
238
    """Check prerequisites for this LU.
239

240
    This method should check that the prerequisites for the execution
241
    of this LU are fulfilled. It can do internode communication, but
242
    it should be idempotent - no cluster or system changes are
243
    allowed.
244

245
    The method should raise errors.OpPrereqError in case something is
246
    not fulfilled. Its return value is ignored.
247

248
    This method should also update all the parameters of the opcode to
249
    their canonical form if it hasn't been done by ExpandNames before.
250

251
    """
252
    if self.tasklets is not None:
253
      for (idx, tl) in enumerate(self.tasklets):
254
        logging.debug("Checking prerequisites for tasklet %s/%s",
255
                      idx + 1, len(self.tasklets))
256
        tl.CheckPrereq()
257
    else:
258
      pass
259

    
260
  def Exec(self, feedback_fn):
261
    """Execute the LU.
262

263
    This method should implement the actual work. It should raise
264
    errors.OpExecError for failures that are somewhat dealt with in
265
    code, or expected.
266

267
    """
268
    if self.tasklets is not None:
269
      for (idx, tl) in enumerate(self.tasklets):
270
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
271
        tl.Exec(feedback_fn)
272
    else:
273
      raise NotImplementedError
274

    
275
  def BuildHooksEnv(self):
276
    """Build hooks environment for this LU.
277

278
    @rtype: dict
279
    @return: Dictionary containing the environment that will be used for
280
      running the hooks for this LU. The keys of the dict must not be prefixed
281
      with "GANETI_"--that'll be added by the hooks runner. The hooks runner
282
      will extend the environment with additional variables. If no environment
283
      should be defined, an empty dictionary should be returned (not C{None}).
284
    @note: If the C{HPATH} attribute of the LU class is C{None}, this function
285
      will not be called.
286

287
    """
288
    raise NotImplementedError
289

    
290
  def BuildHooksNodes(self):
291
    """Build list of nodes to run LU's hooks.
292

293
    @rtype: tuple; (list, list)
294
    @return: Tuple containing a list of node names on which the hook
295
      should run before the execution and a list of node names on which the
296
      hook should run after the execution. No nodes should be returned as an
297
      empty list (and not None).
298
    @note: If the C{HPATH} attribute of the LU class is C{None}, this function
299
      will not be called.
300

301
    """
302
    raise NotImplementedError
303

    
304
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
305
    """Notify the LU about the results of its hooks.
306

307
    This method is called every time a hooks phase is executed, and notifies
308
    the Logical Unit about the hooks' result. The LU can then use it to alter
309
    its result based on the hooks.  By default the method does nothing and the
310
    previous result is passed back unchanged but any LU can define it if it
311
    wants to use the local cluster hook-scripts somehow.
312

313
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
314
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
315
    @param hook_results: the results of the multi-node hooks rpc call
316
    @param feedback_fn: function used send feedback back to the caller
317
    @param lu_result: the previous Exec result this LU had, or None
318
        in the PRE phase
319
    @return: the new Exec result, based on the previous result
320
        and hook results
321

322
    """
323
    # API must be kept, thus we ignore the unused argument and could
324
    # be a function warnings
325
    # pylint: disable=W0613,R0201
326
    return lu_result
327

    
328
  def _ExpandAndLockInstance(self):
329
    """Helper function to expand and lock an instance.
330

331
    Many LUs that work on an instance take its name in self.op.instance_name
332
    and need to expand it and then declare the expanded name for locking. This
333
    function does it, and then updates self.op.instance_name to the expanded
334
    name. It also initializes needed_locks as a dict, if this hasn't been done
335
    before.
336

337
    """
338
    if self.needed_locks is None:
339
      self.needed_locks = {}
340
    else:
341
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
342
        "_ExpandAndLockInstance called with instance-level locks set"
343
    self.op.instance_name = _ExpandInstanceName(self.cfg,
344
                                                self.op.instance_name)
345
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
346

    
347
  def _LockInstancesNodes(self, primary_only=False):
348
    """Helper function to declare instances' nodes for locking.
349

350
    This function should be called after locking one or more instances to lock
351
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
352
    with all primary or secondary nodes for instances already locked and
353
    present in self.needed_locks[locking.LEVEL_INSTANCE].
354

355
    It should be called from DeclareLocks, and for safety only works if
356
    self.recalculate_locks[locking.LEVEL_NODE] is set.
357

358
    In the future it may grow parameters to just lock some instance's nodes, or
359
    to just lock primaries or secondary nodes, if needed.
360

361
    If should be called in DeclareLocks in a way similar to::
362

363
      if level == locking.LEVEL_NODE:
364
        self._LockInstancesNodes()
365

366
    @type primary_only: boolean
367
    @param primary_only: only lock primary nodes of locked instances
368

369
    """
370
    assert locking.LEVEL_NODE in self.recalculate_locks, \
371
      "_LockInstancesNodes helper function called with no nodes to recalculate"
372

    
373
    # TODO: check if we're really been called with the instance locks held
374

    
375
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
376
    # future we might want to have different behaviors depending on the value
377
    # of self.recalculate_locks[locking.LEVEL_NODE]
378
    wanted_nodes = []
379
    locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
380
    for _, instance in self.cfg.GetMultiInstanceInfo(locked_i):
381
      wanted_nodes.append(instance.primary_node)
382
      if not primary_only:
383
        wanted_nodes.extend(instance.secondary_nodes)
384

    
385
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
386
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
387
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
388
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
389

    
390
    del self.recalculate_locks[locking.LEVEL_NODE]
391

    
392

    
393
class NoHooksLU(LogicalUnit): # pylint: disable=W0223
394
  """Simple LU which runs no hooks.
395

396
  This LU is intended as a parent for other LogicalUnits which will
397
  run no hooks, in order to reduce duplicate code.
398

399
  """
400
  HPATH = None
401
  HTYPE = None
402

    
403
  def BuildHooksEnv(self):
404
    """Empty BuildHooksEnv for NoHooksLu.
405

406
    This just raises an error.
407

408
    """
409
    raise AssertionError("BuildHooksEnv called for NoHooksLUs")
410

    
411
  def BuildHooksNodes(self):
412
    """Empty BuildHooksNodes for NoHooksLU.
413

414
    """
415
    raise AssertionError("BuildHooksNodes called for NoHooksLU")
416

    
417

    
418
class Tasklet:
419
  """Tasklet base class.
420

421
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
422
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
423
  tasklets know nothing about locks.
424

425
  Subclasses must follow these rules:
426
    - Implement CheckPrereq
427
    - Implement Exec
428

429
  """
430
  def __init__(self, lu):
431
    self.lu = lu
432

    
433
    # Shortcuts
434
    self.cfg = lu.cfg
435
    self.rpc = lu.rpc
436

    
437
  def CheckPrereq(self):
438
    """Check prerequisites for this tasklets.
439

440
    This method should check whether the prerequisites for the execution of
441
    this tasklet are fulfilled. It can do internode communication, but it
442
    should be idempotent - no cluster or system changes are allowed.
443

444
    The method should raise errors.OpPrereqError in case something is not
445
    fulfilled. Its return value is ignored.
446

447
    This method should also update all parameters to their canonical form if it
448
    hasn't been done before.
449

450
    """
451
    pass
452

    
453
  def Exec(self, feedback_fn):
454
    """Execute the tasklet.
455

456
    This method should implement the actual work. It should raise
457
    errors.OpExecError for failures that are somewhat dealt with in code, or
458
    expected.
459

460
    """
461
    raise NotImplementedError
462

    
463

    
464
class _QueryBase:
465
  """Base for query utility classes.
466

467
  """
468
  #: Attribute holding field definitions
469
  FIELDS = None
470

    
471
  def __init__(self, filter_, fields, use_locking):
472
    """Initializes this class.
473

474
    """
475
    self.use_locking = use_locking
476

    
477
    self.query = query.Query(self.FIELDS, fields, filter_=filter_,
478
                             namefield="name")
479
    self.requested_data = self.query.RequestedData()
480
    self.names = self.query.RequestedNames()
481

    
482
    # Sort only if no names were requested
483
    self.sort_by_name = not self.names
484

    
485
    self.do_locking = None
486
    self.wanted = None
487

    
488
  def _GetNames(self, lu, all_names, lock_level):
489
    """Helper function to determine names asked for in the query.
490

491
    """
492
    if self.do_locking:
493
      names = lu.owned_locks(lock_level)
494
    else:
495
      names = all_names
496

    
497
    if self.wanted == locking.ALL_SET:
498
      assert not self.names
499
      # caller didn't specify names, so ordering is not important
500
      return utils.NiceSort(names)
501

    
502
    # caller specified names and we must keep the same order
503
    assert self.names
504
    assert not self.do_locking or lu.glm.is_owned(lock_level)
505

    
506
    missing = set(self.wanted).difference(names)
507
    if missing:
508
      raise errors.OpExecError("Some items were removed before retrieving"
509
                               " their data: %s" % missing)
510

    
511
    # Return expanded names
512
    return self.wanted
513

    
514
  def ExpandNames(self, lu):
515
    """Expand names for this query.
516

517
    See L{LogicalUnit.ExpandNames}.
518

519
    """
520
    raise NotImplementedError()
521

    
522
  def DeclareLocks(self, lu, level):
523
    """Declare locks for this query.
524

525
    See L{LogicalUnit.DeclareLocks}.
526

527
    """
528
    raise NotImplementedError()
529

    
530
  def _GetQueryData(self, lu):
531
    """Collects all data for this query.
532

533
    @return: Query data object
534

535
    """
536
    raise NotImplementedError()
537

    
538
  def NewStyleQuery(self, lu):
539
    """Collect data and execute query.
540

541
    """
542
    return query.GetQueryResponse(self.query, self._GetQueryData(lu),
543
                                  sort_by_name=self.sort_by_name)
544

    
545
  def OldStyleQuery(self, lu):
546
    """Collect data and execute query.
547

548
    """
549
    return self.query.OldStyleQuery(self._GetQueryData(lu),
550
                                    sort_by_name=self.sort_by_name)
551

    
552

    
553
def _ShareAll():
554
  """Returns a dict declaring all lock levels shared.
555

556
  """
557
  return dict.fromkeys(locking.LEVELS, 1)
558

    
559

    
560
def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups):
561
  """Checks if the owned node groups are still correct for an instance.
562

563
  @type cfg: L{config.ConfigWriter}
564
  @param cfg: The cluster configuration
565
  @type instance_name: string
566
  @param instance_name: Instance name
567
  @type owned_groups: set or frozenset
568
  @param owned_groups: List of currently owned node groups
569

570
  """
571
  inst_groups = cfg.GetInstanceNodeGroups(instance_name)
572

    
573
  if not owned_groups.issuperset(inst_groups):
574
    raise errors.OpPrereqError("Instance %s's node groups changed since"
575
                               " locks were acquired, current groups are"
576
                               " are '%s', owning groups '%s'; retry the"
577
                               " operation" %
578
                               (instance_name,
579
                                utils.CommaJoin(inst_groups),
580
                                utils.CommaJoin(owned_groups)),
581
                               errors.ECODE_STATE)
582

    
583
  return inst_groups
584

    
585

    
586
def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
587
  """Checks if the instances in a node group are still correct.
588

589
  @type cfg: L{config.ConfigWriter}
590
  @param cfg: The cluster configuration
591
  @type group_uuid: string
592
  @param group_uuid: Node group UUID
593
  @type owned_instances: set or frozenset
594
  @param owned_instances: List of currently owned instances
595

596
  """
597
  wanted_instances = cfg.GetNodeGroupInstances(group_uuid)
598
  if owned_instances != wanted_instances:
599
    raise errors.OpPrereqError("Instances in node group '%s' changed since"
600
                               " locks were acquired, wanted '%s', have '%s';"
601
                               " retry the operation" %
602
                               (group_uuid,
603
                                utils.CommaJoin(wanted_instances),
604
                                utils.CommaJoin(owned_instances)),
605
                               errors.ECODE_STATE)
606

    
607
  return wanted_instances
608

    
609

    
610
def _SupportsOob(cfg, node):
611
  """Tells if node supports OOB.
612

613
  @type cfg: L{config.ConfigWriter}
614
  @param cfg: The cluster configuration
615
  @type node: L{objects.Node}
616
  @param node: The node
617
  @return: The OOB script if supported or an empty string otherwise
618

619
  """
620
  return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
621

    
622

    
623
def _GetWantedNodes(lu, nodes):
624
  """Returns list of checked and expanded node names.
625

626
  @type lu: L{LogicalUnit}
627
  @param lu: the logical unit on whose behalf we execute
628
  @type nodes: list
629
  @param nodes: list of node names or None for all nodes
630
  @rtype: list
631
  @return: the list of nodes, sorted
632
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
633

634
  """
635
  if nodes:
636
    return [_ExpandNodeName(lu.cfg, name) for name in nodes]
637

    
638
  return utils.NiceSort(lu.cfg.GetNodeList())
639

    
640

    
641
def _GetWantedInstances(lu, instances):
642
  """Returns list of checked and expanded instance names.
643

644
  @type lu: L{LogicalUnit}
645
  @param lu: the logical unit on whose behalf we execute
646
  @type instances: list
647
  @param instances: list of instance names or None for all instances
648
  @rtype: list
649
  @return: the list of instances, sorted
650
  @raise errors.OpPrereqError: if the instances parameter is wrong type
651
  @raise errors.OpPrereqError: if any of the passed instances is not found
652

653
  """
654
  if instances:
655
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
656
  else:
657
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
658
  return wanted
659

    
660

    
661
def _GetUpdatedParams(old_params, update_dict,
662
                      use_default=True, use_none=False):
663
  """Return the new version of a parameter dictionary.
664

665
  @type old_params: dict
666
  @param old_params: old parameters
667
  @type update_dict: dict
668
  @param update_dict: dict containing new parameter values, or
669
      constants.VALUE_DEFAULT to reset the parameter to its default
670
      value
671
  @param use_default: boolean
672
  @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
673
      values as 'to be deleted' values
674
  @param use_none: boolean
675
  @type use_none: whether to recognise C{None} values as 'to be
676
      deleted' values
677
  @rtype: dict
678
  @return: the new parameter dictionary
679

680
  """
681
  params_copy = copy.deepcopy(old_params)
682
  for key, val in update_dict.iteritems():
683
    if ((use_default and val == constants.VALUE_DEFAULT) or
684
        (use_none and val is None)):
685
      try:
686
        del params_copy[key]
687
      except KeyError:
688
        pass
689
    else:
690
      params_copy[key] = val
691
  return params_copy
692

    
693

    
694
def _ReleaseLocks(lu, level, names=None, keep=None):
695
  """Releases locks owned by an LU.
696

697
  @type lu: L{LogicalUnit}
698
  @param level: Lock level
699
  @type names: list or None
700
  @param names: Names of locks to release
701
  @type keep: list or None
702
  @param keep: Names of locks to retain
703

704
  """
705
  assert not (keep is not None and names is not None), \
706
         "Only one of the 'names' and the 'keep' parameters can be given"
707

    
708
  if names is not None:
709
    should_release = names.__contains__
710
  elif keep:
711
    should_release = lambda name: name not in keep
712
  else:
713
    should_release = None
714

    
715
  if should_release:
716
    retain = []
717
    release = []
718

    
719
    # Determine which locks to release
720
    for name in lu.owned_locks(level):
721
      if should_release(name):
722
        release.append(name)
723
      else:
724
        retain.append(name)
725

    
726
    assert len(lu.owned_locks(level)) == (len(retain) + len(release))
727

    
728
    # Release just some locks
729
    lu.glm.release(level, names=release)
730

    
731
    assert frozenset(lu.owned_locks(level)) == frozenset(retain)
732
  else:
733
    # Release everything
734
    lu.glm.release(level)
735

    
736
    assert not lu.glm.is_owned(level), "No locks should be owned"
737

    
738

    
739
def _MapInstanceDisksToNodes(instances):
740
  """Creates a map from (node, volume) to instance name.
741

742
  @type instances: list of L{objects.Instance}
743
  @rtype: dict; tuple of (node name, volume name) as key, instance name as value
744

745
  """
746
  return dict(((node, vol), inst.name)
747
              for inst in instances
748
              for (node, vols) in inst.MapLVsByNode().items()
749
              for vol in vols)
750

    
751

    
752
def _RunPostHook(lu, node_name):
753
  """Runs the post-hook for an opcode on a single node.
754

755
  """
756
  hm = lu.proc.hmclass(lu.rpc.call_hooks_runner, lu)
757
  try:
758
    hm.RunPhase(constants.HOOKS_PHASE_POST, nodes=[node_name])
759
  except:
760
    # pylint: disable=W0702
761
    lu.LogWarning("Errors occurred running hooks on %s" % node_name)
762

    
763

    
764
def _CheckOutputFields(static, dynamic, selected):
765
  """Checks whether all selected fields are valid.
766

767
  @type static: L{utils.FieldSet}
768
  @param static: static fields set
769
  @type dynamic: L{utils.FieldSet}
770
  @param dynamic: dynamic fields set
771

772
  """
773
  f = utils.FieldSet()
774
  f.Extend(static)
775
  f.Extend(dynamic)
776

    
777
  delta = f.NonMatching(selected)
778
  if delta:
779
    raise errors.OpPrereqError("Unknown output fields selected: %s"
780
                               % ",".join(delta), errors.ECODE_INVAL)
781

    
782

    
783
def _CheckGlobalHvParams(params):
784
  """Validates that given hypervisor params are not global ones.
785

786
  This will ensure that instances don't get customised versions of
787
  global params.
788

789
  """
790
  used_globals = constants.HVC_GLOBALS.intersection(params)
791
  if used_globals:
792
    msg = ("The following hypervisor parameters are global and cannot"
793
           " be customized at instance level, please modify them at"
794
           " cluster level: %s" % utils.CommaJoin(used_globals))
795
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
796

    
797

    
798
def _CheckNodeOnline(lu, node, msg=None):
799
  """Ensure that a given node is online.
800

801
  @param lu: the LU on behalf of which we make the check
802
  @param node: the node to check
803
  @param msg: if passed, should be a message to replace the default one
804
  @raise errors.OpPrereqError: if the node is offline
805

806
  """
807
  if msg is None:
808
    msg = "Can't use offline node"
809
  if lu.cfg.GetNodeInfo(node).offline:
810
    raise errors.OpPrereqError("%s: %s" % (msg, node), errors.ECODE_STATE)
811

    
812

    
813
def _CheckNodeNotDrained(lu, node):
814
  """Ensure that a given node is not drained.
815

816
  @param lu: the LU on behalf of which we make the check
817
  @param node: the node to check
818
  @raise errors.OpPrereqError: if the node is drained
819

820
  """
821
  if lu.cfg.GetNodeInfo(node).drained:
822
    raise errors.OpPrereqError("Can't use drained node %s" % node,
823
                               errors.ECODE_STATE)
824

    
825

    
826
def _CheckNodeVmCapable(lu, node):
827
  """Ensure that a given node is vm capable.
828

829
  @param lu: the LU on behalf of which we make the check
830
  @param node: the node to check
831
  @raise errors.OpPrereqError: if the node is not vm capable
832

833
  """
834
  if not lu.cfg.GetNodeInfo(node).vm_capable:
835
    raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
836
                               errors.ECODE_STATE)
837

    
838

    
839
def _CheckNodeHasOS(lu, node, os_name, force_variant):
840
  """Ensure that a node supports a given OS.
841

842
  @param lu: the LU on behalf of which we make the check
843
  @param node: the node to check
844
  @param os_name: the OS to query about
845
  @param force_variant: whether to ignore variant errors
846
  @raise errors.OpPrereqError: if the node is not supporting the OS
847

848
  """
849
  result = lu.rpc.call_os_get(node, os_name)
850
  result.Raise("OS '%s' not in supported OS list for node %s" %
851
               (os_name, node),
852
               prereq=True, ecode=errors.ECODE_INVAL)
853
  if not force_variant:
854
    _CheckOSVariant(result.payload, os_name)
855

    
856

    
857
def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
858
  """Ensure that a node has the given secondary ip.
859

860
  @type lu: L{LogicalUnit}
861
  @param lu: the LU on behalf of which we make the check
862
  @type node: string
863
  @param node: the node to check
864
  @type secondary_ip: string
865
  @param secondary_ip: the ip to check
866
  @type prereq: boolean
867
  @param prereq: whether to throw a prerequisite or an execute error
868
  @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
869
  @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
870

871
  """
872
  result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
873
  result.Raise("Failure checking secondary ip on node %s" % node,
874
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
875
  if not result.payload:
876
    msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
877
           " please fix and re-run this command" % secondary_ip)
878
    if prereq:
879
      raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
880
    else:
881
      raise errors.OpExecError(msg)
882

    
883

    
884
def _GetClusterDomainSecret():
885
  """Reads the cluster domain secret.
886

887
  """
888
  return utils.ReadOneLineFile(constants.CLUSTER_DOMAIN_SECRET_FILE,
889
                               strict=True)
890

    
891

    
892
def _CheckInstanceDown(lu, instance, reason):
893
  """Ensure that an instance is not running."""
894
  if instance.admin_up:
895
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
896
                               (instance.name, reason), errors.ECODE_STATE)
897

    
898
  pnode = instance.primary_node
899
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
900
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
901
              prereq=True, ecode=errors.ECODE_ENVIRON)
902

    
903
  if instance.name in ins_l.payload:
904
    raise errors.OpPrereqError("Instance %s is running, %s" %
905
                               (instance.name, reason), errors.ECODE_STATE)
906

    
907

    
908
def _ExpandItemName(fn, name, kind):
909
  """Expand an item name.
910

911
  @param fn: the function to use for expansion
912
  @param name: requested item name
913
  @param kind: text description ('Node' or 'Instance')
914
  @return: the resolved (full) name
915
  @raise errors.OpPrereqError: if the item is not found
916

917
  """
918
  full_name = fn(name)
919
  if full_name is None:
920
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
921
                               errors.ECODE_NOENT)
922
  return full_name
923

    
924

    
925
def _ExpandNodeName(cfg, name):
926
  """Wrapper over L{_ExpandItemName} for nodes."""
927
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
928

    
929

    
930
def _ExpandInstanceName(cfg, name):
931
  """Wrapper over L{_ExpandItemName} for instance."""
932
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
933

    
934

    
935
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
936
                          memory, vcpus, nics, disk_template, disks,
937
                          bep, hvp, hypervisor_name, tags):
938
  """Builds instance related env variables for hooks
939

940
  This builds the hook environment from individual variables.
941

942
  @type name: string
943
  @param name: the name of the instance
944
  @type primary_node: string
945
  @param primary_node: the name of the instance's primary node
946
  @type secondary_nodes: list
947
  @param secondary_nodes: list of secondary nodes as strings
948
  @type os_type: string
949
  @param os_type: the name of the instance's OS
950
  @type status: boolean
951
  @param status: the should_run status of the instance
952
  @type memory: string
953
  @param memory: the memory size of the instance
954
  @type vcpus: string
955
  @param vcpus: the count of VCPUs the instance has
956
  @type nics: list
957
  @param nics: list of tuples (ip, mac, mode, link) representing
958
      the NICs the instance has
959
  @type disk_template: string
960
  @param disk_template: the disk template of the instance
961
  @type disks: list
962
  @param disks: the list of (size, mode) pairs
963
  @type bep: dict
964
  @param bep: the backend parameters for the instance
965
  @type hvp: dict
966
  @param hvp: the hypervisor parameters for the instance
967
  @type hypervisor_name: string
968
  @param hypervisor_name: the hypervisor for the instance
969
  @type tags: list
970
  @param tags: list of instance tags as strings
971
  @rtype: dict
972
  @return: the hook environment for this instance
973

974
  """
975
  if status:
976
    str_status = "up"
977
  else:
978
    str_status = "down"
979
  env = {
980
    "OP_TARGET": name,
981
    "INSTANCE_NAME": name,
982
    "INSTANCE_PRIMARY": primary_node,
983
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
984
    "INSTANCE_OS_TYPE": os_type,
985
    "INSTANCE_STATUS": str_status,
986
    "INSTANCE_MEMORY": memory,
987
    "INSTANCE_VCPUS": vcpus,
988
    "INSTANCE_DISK_TEMPLATE": disk_template,
989
    "INSTANCE_HYPERVISOR": hypervisor_name,
990
  }
991

    
992
  if nics:
993
    nic_count = len(nics)
994
    for idx, (ip, mac, mode, link) in enumerate(nics):
995
      if ip is None:
996
        ip = ""
997
      env["INSTANCE_NIC%d_IP" % idx] = ip
998
      env["INSTANCE_NIC%d_MAC" % idx] = mac
999
      env["INSTANCE_NIC%d_MODE" % idx] = mode
1000
      env["INSTANCE_NIC%d_LINK" % idx] = link
1001
      if mode == constants.NIC_MODE_BRIDGED:
1002
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
1003
  else:
1004
    nic_count = 0
1005

    
1006
  env["INSTANCE_NIC_COUNT"] = nic_count
1007

    
1008
  if disks:
1009
    disk_count = len(disks)
1010
    for idx, (size, mode) in enumerate(disks):
1011
      env["INSTANCE_DISK%d_SIZE" % idx] = size
1012
      env["INSTANCE_DISK%d_MODE" % idx] = mode
1013
  else:
1014
    disk_count = 0
1015

    
1016
  env["INSTANCE_DISK_COUNT"] = disk_count
1017

    
1018
  if not tags:
1019
    tags = []
1020

    
1021
  env["INSTANCE_TAGS"] = " ".join(tags)
1022

    
1023
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
1024
    for key, value in source.items():
1025
      env["INSTANCE_%s_%s" % (kind, key)] = value
1026

    
1027
  return env
1028

    
1029

    
1030
def _NICListToTuple(lu, nics):
1031
  """Build a list of nic information tuples.
1032

1033
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
1034
  value in LUInstanceQueryData.
1035

1036
  @type lu:  L{LogicalUnit}
1037
  @param lu: the logical unit on whose behalf we execute
1038
  @type nics: list of L{objects.NIC}
1039
  @param nics: list of nics to convert to hooks tuples
1040

1041
  """
1042
  hooks_nics = []
1043
  cluster = lu.cfg.GetClusterInfo()
1044
  for nic in nics:
1045
    ip = nic.ip
1046
    mac = nic.mac
1047
    filled_params = cluster.SimpleFillNIC(nic.nicparams)
1048
    mode = filled_params[constants.NIC_MODE]
1049
    link = filled_params[constants.NIC_LINK]
1050
    hooks_nics.append((ip, mac, mode, link))
1051
  return hooks_nics
1052

    
1053

    
1054
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
1055
  """Builds instance related env variables for hooks from an object.
1056

1057
  @type lu: L{LogicalUnit}
1058
  @param lu: the logical unit on whose behalf we execute
1059
  @type instance: L{objects.Instance}
1060
  @param instance: the instance for which we should build the
1061
      environment
1062
  @type override: dict
1063
  @param override: dictionary with key/values that will override
1064
      our values
1065
  @rtype: dict
1066
  @return: the hook environment dictionary
1067

1068
  """
1069
  cluster = lu.cfg.GetClusterInfo()
1070
  bep = cluster.FillBE(instance)
1071
  hvp = cluster.FillHV(instance)
1072
  args = {
1073
    "name": instance.name,
1074
    "primary_node": instance.primary_node,
1075
    "secondary_nodes": instance.secondary_nodes,
1076
    "os_type": instance.os,
1077
    "status": instance.admin_up,
1078
    "memory": bep[constants.BE_MEMORY],
1079
    "vcpus": bep[constants.BE_VCPUS],
1080
    "nics": _NICListToTuple(lu, instance.nics),
1081
    "disk_template": instance.disk_template,
1082
    "disks": [(disk.size, disk.mode) for disk in instance.disks],
1083
    "bep": bep,
1084
    "hvp": hvp,
1085
    "hypervisor_name": instance.hypervisor,
1086
    "tags": instance.tags,
1087
  }
1088
  if override:
1089
    args.update(override)
1090
  return _BuildInstanceHookEnv(**args) # pylint: disable=W0142
1091

    
1092

    
1093
def _AdjustCandidatePool(lu, exceptions):
1094
  """Adjust the candidate pool after node operations.
1095

1096
  """
1097
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
1098
  if mod_list:
1099
    lu.LogInfo("Promoted nodes to master candidate role: %s",
1100
               utils.CommaJoin(node.name for node in mod_list))
1101
    for name in mod_list:
1102
      lu.context.ReaddNode(name)
1103
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1104
  if mc_now > mc_max:
1105
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
1106
               (mc_now, mc_max))
1107

    
1108

    
1109
def _DecideSelfPromotion(lu, exceptions=None):
1110
  """Decide whether I should promote myself as a master candidate.
1111

1112
  """
1113
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
1114
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
1115
  # the new node will increase mc_max with one, so:
1116
  mc_should = min(mc_should + 1, cp_size)
1117
  return mc_now < mc_should
1118

    
1119

    
1120
def _CheckNicsBridgesExist(lu, target_nics, target_node):
1121
  """Check that the brigdes needed by a list of nics exist.
1122

1123
  """
1124
  cluster = lu.cfg.GetClusterInfo()
1125
  paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
1126
  brlist = [params[constants.NIC_LINK] for params in paramslist
1127
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
1128
  if brlist:
1129
    result = lu.rpc.call_bridges_exist(target_node, brlist)
1130
    result.Raise("Error checking bridges on destination node '%s'" %
1131
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
1132

    
1133

    
1134
def _CheckInstanceBridgesExist(lu, instance, node=None):
1135
  """Check that the brigdes needed by an instance exist.
1136

1137
  """
1138
  if node is None:
1139
    node = instance.primary_node
1140
  _CheckNicsBridgesExist(lu, instance.nics, node)
1141

    
1142

    
1143
def _CheckOSVariant(os_obj, name):
1144
  """Check whether an OS name conforms to the os variants specification.
1145

1146
  @type os_obj: L{objects.OS}
1147
  @param os_obj: OS object to check
1148
  @type name: string
1149
  @param name: OS name passed by the user, to check for validity
1150

1151
  """
1152
  variant = objects.OS.GetVariant(name)
1153
  if not os_obj.supported_variants:
1154
    if variant:
1155
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
1156
                                 " passed)" % (os_obj.name, variant),
1157
                                 errors.ECODE_INVAL)
1158
    return
1159
  if not variant:
1160
    raise errors.OpPrereqError("OS name must include a variant",
1161
                               errors.ECODE_INVAL)
1162

    
1163
  if variant not in os_obj.supported_variants:
1164
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
1165

    
1166

    
1167
def _GetNodeInstancesInner(cfg, fn):
1168
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
1169

    
1170

    
1171
def _GetNodeInstances(cfg, node_name):
1172
  """Returns a list of all primary and secondary instances on a node.
1173

1174
  """
1175

    
1176
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
1177

    
1178

    
1179
def _GetNodePrimaryInstances(cfg, node_name):
1180
  """Returns primary instances on a node.
1181

1182
  """
1183
  return _GetNodeInstancesInner(cfg,
1184
                                lambda inst: node_name == inst.primary_node)
1185

    
1186

    
1187
def _GetNodeSecondaryInstances(cfg, node_name):
1188
  """Returns secondary instances on a node.
1189

1190
  """
1191
  return _GetNodeInstancesInner(cfg,
1192
                                lambda inst: node_name in inst.secondary_nodes)
1193

    
1194

    
1195
def _GetStorageTypeArgs(cfg, storage_type):
1196
  """Returns the arguments for a storage type.
1197

1198
  """
1199
  # Special case for file storage
1200
  if storage_type == constants.ST_FILE:
1201
    # storage.FileStorage wants a list of storage directories
1202
    return [[cfg.GetFileStorageDir(), cfg.GetSharedFileStorageDir()]]
1203

    
1204
  return []
1205

    
1206

    
1207
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
1208
  faulty = []
1209

    
1210
  for dev in instance.disks:
1211
    cfg.SetDiskID(dev, node_name)
1212

    
1213
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
1214
  result.Raise("Failed to get disk status from node %s" % node_name,
1215
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
1216

    
1217
  for idx, bdev_status in enumerate(result.payload):
1218
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
1219
      faulty.append(idx)
1220

    
1221
  return faulty
1222

    
1223

    
1224
def _CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1225
  """Check the sanity of iallocator and node arguments and use the
1226
  cluster-wide iallocator if appropriate.
1227

1228
  Check that at most one of (iallocator, node) is specified. If none is
1229
  specified, then the LU's opcode's iallocator slot is filled with the
1230
  cluster-wide default iallocator.
1231

1232
  @type iallocator_slot: string
1233
  @param iallocator_slot: the name of the opcode iallocator slot
1234
  @type node_slot: string
1235
  @param node_slot: the name of the opcode target node slot
1236

1237
  """
1238
  node = getattr(lu.op, node_slot, None)
1239
  iallocator = getattr(lu.op, iallocator_slot, None)
1240

    
1241
  if node is not None and iallocator is not None:
1242
    raise errors.OpPrereqError("Do not specify both, iallocator and node",
1243
                               errors.ECODE_INVAL)
1244
  elif node is None and iallocator is None:
1245
    default_iallocator = lu.cfg.GetDefaultIAllocator()
1246
    if default_iallocator:
1247
      setattr(lu.op, iallocator_slot, default_iallocator)
1248
    else:
1249
      raise errors.OpPrereqError("No iallocator or node given and no"
1250
                                 " cluster-wide default iallocator found;"
1251
                                 " please specify either an iallocator or a"
1252
                                 " node, or set a cluster-wide default"
1253
                                 " iallocator")
1254

    
1255

    
1256
def _GetDefaultIAllocator(cfg, iallocator):
1257
  """Decides on which iallocator to use.
1258

1259
  @type cfg: L{config.ConfigWriter}
1260
  @param cfg: Cluster configuration object
1261
  @type iallocator: string or None
1262
  @param iallocator: Iallocator specified in opcode
1263
  @rtype: string
1264
  @return: Iallocator name
1265

1266
  """
1267
  if not iallocator:
1268
    # Use default iallocator
1269
    iallocator = cfg.GetDefaultIAllocator()
1270

    
1271
  if not iallocator:
1272
    raise errors.OpPrereqError("No iallocator was specified, neither in the"
1273
                               " opcode nor as a cluster-wide default",
1274
                               errors.ECODE_INVAL)
1275

    
1276
  return iallocator
1277

    
1278

    
1279
class LUClusterPostInit(LogicalUnit):
1280
  """Logical unit for running hooks after cluster initialization.
1281

1282
  """
1283
  HPATH = "cluster-init"
1284
  HTYPE = constants.HTYPE_CLUSTER
1285

    
1286
  def BuildHooksEnv(self):
1287
    """Build hooks env.
1288

1289
    """
1290
    return {
1291
      "OP_TARGET": self.cfg.GetClusterName(),
1292
      }
1293

    
1294
  def BuildHooksNodes(self):
1295
    """Build hooks nodes.
1296

1297
    """
1298
    return ([], [self.cfg.GetMasterNode()])
1299

    
1300
  def Exec(self, feedback_fn):
1301
    """Nothing to do.
1302

1303
    """
1304
    return True
1305

    
1306

    
1307
class LUClusterDestroy(LogicalUnit):
1308
  """Logical unit for destroying the cluster.
1309

1310
  """
1311
  HPATH = "cluster-destroy"
1312
  HTYPE = constants.HTYPE_CLUSTER
1313

    
1314
  def BuildHooksEnv(self):
1315
    """Build hooks env.
1316

1317
    """
1318
    return {
1319
      "OP_TARGET": self.cfg.GetClusterName(),
1320
      }
1321

    
1322
  def BuildHooksNodes(self):
1323
    """Build hooks nodes.
1324

1325
    """
1326
    return ([], [])
1327

    
1328
  def CheckPrereq(self):
1329
    """Check prerequisites.
1330

1331
    This checks whether the cluster is empty.
1332

1333
    Any errors are signaled by raising errors.OpPrereqError.
1334

1335
    """
1336
    master = self.cfg.GetMasterNode()
1337

    
1338
    nodelist = self.cfg.GetNodeList()
1339
    if len(nodelist) != 1 or nodelist[0] != master:
1340
      raise errors.OpPrereqError("There are still %d node(s) in"
1341
                                 " this cluster." % (len(nodelist) - 1),
1342
                                 errors.ECODE_INVAL)
1343
    instancelist = self.cfg.GetInstanceList()
1344
    if instancelist:
1345
      raise errors.OpPrereqError("There are still %d instance(s) in"
1346
                                 " this cluster." % len(instancelist),
1347
                                 errors.ECODE_INVAL)
1348

    
1349
  def Exec(self, feedback_fn):
1350
    """Destroys the cluster.
1351

1352
    """
1353
    master = self.cfg.GetMasterNode()
1354

    
1355
    # Run post hooks on master node before it's removed
1356
    _RunPostHook(self, master)
1357

    
1358
    result = self.rpc.call_node_stop_master(master, False)
1359
    result.Raise("Could not disable the master role")
1360

    
1361
    return master
1362

    
1363

    
1364
def _VerifyCertificate(filename):
1365
  """Verifies a certificate for L{LUClusterVerifyConfig}.
1366

1367
  @type filename: string
1368
  @param filename: Path to PEM file
1369

1370
  """
1371
  try:
1372
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1373
                                           utils.ReadFile(filename))
1374
  except Exception, err: # pylint: disable=W0703
1375
    return (LUClusterVerifyConfig.ETYPE_ERROR,
1376
            "Failed to load X509 certificate %s: %s" % (filename, err))
1377

    
1378
  (errcode, msg) = \
1379
    utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
1380
                                constants.SSL_CERT_EXPIRATION_ERROR)
1381

    
1382
  if msg:
1383
    fnamemsg = "While verifying %s: %s" % (filename, msg)
1384
  else:
1385
    fnamemsg = None
1386

    
1387
  if errcode is None:
1388
    return (None, fnamemsg)
1389
  elif errcode == utils.CERT_WARNING:
1390
    return (LUClusterVerifyConfig.ETYPE_WARNING, fnamemsg)
1391
  elif errcode == utils.CERT_ERROR:
1392
    return (LUClusterVerifyConfig.ETYPE_ERROR, fnamemsg)
1393

    
1394
  raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
1395

    
1396

    
1397
def _GetAllHypervisorParameters(cluster, instances):
1398
  """Compute the set of all hypervisor parameters.
1399

1400
  @type cluster: L{objects.Cluster}
1401
  @param cluster: the cluster object
1402
  @param instances: list of L{objects.Instance}
1403
  @param instances: additional instances from which to obtain parameters
1404
  @rtype: list of (origin, hypervisor, parameters)
1405
  @return: a list with all parameters found, indicating the hypervisor they
1406
       apply to, and the origin (can be "cluster", "os X", or "instance Y")
1407

1408
  """
1409
  hvp_data = []
1410

    
1411
  for hv_name in cluster.enabled_hypervisors:
1412
    hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
1413

    
1414
  for os_name, os_hvp in cluster.os_hvp.items():
1415
    for hv_name, hv_params in os_hvp.items():
1416
      if hv_params:
1417
        full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
1418
        hvp_data.append(("os %s" % os_name, hv_name, full_params))
1419

    
1420
  # TODO: collapse identical parameter values in a single one
1421
  for instance in instances:
1422
    if instance.hvparams:
1423
      hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
1424
                       cluster.FillHV(instance)))
1425

    
1426
  return hvp_data
1427

    
1428

    
1429
class _VerifyErrors(object):
1430
  """Mix-in for cluster/group verify LUs.
1431

1432
  It provides _Error and _ErrorIf, and updates the self.bad boolean. (Expects
1433
  self.op and self._feedback_fn to be available.)
1434

1435
  """
1436
  TCLUSTER = "cluster"
1437
  TNODE = "node"
1438
  TINSTANCE = "instance"
1439

    
1440
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1441
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1442
  ECLUSTERFILECHECK = (TCLUSTER, "ECLUSTERFILECHECK")
1443
  ECLUSTERDANGLINGNODES = (TNODE, "ECLUSTERDANGLINGNODES")
1444
  ECLUSTERDANGLINGINST = (TNODE, "ECLUSTERDANGLINGINST")
1445
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1446
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1447
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1448
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1449
  EINSTANCEFAULTYDISK = (TINSTANCE, "EINSTANCEFAULTYDISK")
1450
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1451
  EINSTANCESPLITGROUPS = (TINSTANCE, "EINSTANCESPLITGROUPS")
1452
  ENODEDRBD = (TNODE, "ENODEDRBD")
1453
  ENODEDRBDHELPER = (TNODE, "ENODEDRBDHELPER")
1454
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1455
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1456
  ENODEHV = (TNODE, "ENODEHV")
1457
  ENODELVM = (TNODE, "ENODELVM")
1458
  ENODEN1 = (TNODE, "ENODEN1")
1459
  ENODENET = (TNODE, "ENODENET")
1460
  ENODEOS = (TNODE, "ENODEOS")
1461
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1462
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1463
  ENODERPC = (TNODE, "ENODERPC")
1464
  ENODESSH = (TNODE, "ENODESSH")
1465
  ENODEVERSION = (TNODE, "ENODEVERSION")
1466
  ENODESETUP = (TNODE, "ENODESETUP")
1467
  ENODETIME = (TNODE, "ENODETIME")
1468
  ENODEOOBPATH = (TNODE, "ENODEOOBPATH")
1469

    
1470
  ETYPE_FIELD = "code"
1471
  ETYPE_ERROR = "ERROR"
1472
  ETYPE_WARNING = "WARNING"
1473

    
1474
  def _Error(self, ecode, item, msg, *args, **kwargs):
1475
    """Format an error message.
1476

1477
    Based on the opcode's error_codes parameter, either format a
1478
    parseable error code, or a simpler error string.
1479

1480
    This must be called only from Exec and functions called from Exec.
1481

1482
    """
1483
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1484
    itype, etxt = ecode
1485
    # first complete the msg
1486
    if args:
1487
      msg = msg % args
1488
    # then format the whole message
1489
    if self.op.error_codes: # This is a mix-in. pylint: disable=E1101
1490
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1491
    else:
1492
      if item:
1493
        item = " " + item
1494
      else:
1495
        item = ""
1496
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1497
    # and finally report it via the feedback_fn
1498
    self._feedback_fn("  - %s" % msg) # Mix-in. pylint: disable=E1101
1499

    
1500
  def _ErrorIf(self, cond, *args, **kwargs):
1501
    """Log an error message if the passed condition is True.
1502

1503
    """
1504
    cond = (bool(cond)
1505
            or self.op.debug_simulate_errors) # pylint: disable=E1101
1506
    if cond:
1507
      self._Error(*args, **kwargs)
1508
    # do not mark the operation as failed for WARN cases only
1509
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1510
      self.bad = self.bad or cond
1511

    
1512

    
1513
class LUClusterVerify(NoHooksLU):
1514
  """Submits all jobs necessary to verify the cluster.
1515

1516
  """
1517
  REQ_BGL = False
1518

    
1519
  def ExpandNames(self):
1520
    self.needed_locks = {}
1521

    
1522
  def Exec(self, feedback_fn):
1523
    jobs = []
1524

    
1525
    if self.op.group_name:
1526
      groups = [self.op.group_name]
1527
      depends_fn = lambda: None
1528
    else:
1529
      groups = self.cfg.GetNodeGroupList()
1530

    
1531
      # Verify global configuration
1532
      jobs.append([opcodes.OpClusterVerifyConfig()])
1533

    
1534
      # Always depend on global verification
1535
      depends_fn = lambda: [(-len(jobs), [])]
1536

    
1537
    jobs.extend([opcodes.OpClusterVerifyGroup(group_name=group,
1538
                                              depends=depends_fn())]
1539
                for group in groups)
1540

    
1541
    # Fix up all parameters
1542
    for op in itertools.chain(*jobs): # pylint: disable=W0142
1543
      op.debug_simulate_errors = self.op.debug_simulate_errors
1544
      op.verbose = self.op.verbose
1545
      op.error_codes = self.op.error_codes
1546
      try:
1547
        op.skip_checks = self.op.skip_checks
1548
      except AttributeError:
1549
        assert not isinstance(op, opcodes.OpClusterVerifyGroup)
1550

    
1551
    return ResultWithJobs(jobs)
1552

    
1553

    
1554
class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
1555
  """Verifies the cluster config.
1556

1557
  """
1558
  REQ_BGL = True
1559

    
1560
  def _VerifyHVP(self, hvp_data):
1561
    """Verifies locally the syntax of the hypervisor parameters.
1562

1563
    """
1564
    for item, hv_name, hv_params in hvp_data:
1565
      msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
1566
             (item, hv_name))
1567
      try:
1568
        hv_class = hypervisor.GetHypervisor(hv_name)
1569
        utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1570
        hv_class.CheckParameterSyntax(hv_params)
1571
      except errors.GenericError, err:
1572
        self._ErrorIf(True, self.ECLUSTERCFG, None, msg % str(err))
1573

    
1574
  def ExpandNames(self):
1575
    # Information can be safely retrieved as the BGL is acquired in exclusive
1576
    # mode
1577
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1578
    self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
1579
    self.all_node_info = self.cfg.GetAllNodesInfo()
1580
    self.all_inst_info = self.cfg.GetAllInstancesInfo()
1581
    self.needed_locks = {}
1582

    
1583
  def Exec(self, feedback_fn):
1584
    """Verify integrity of cluster, performing various test on nodes.
1585

1586
    """
1587
    self.bad = False
1588
    self._feedback_fn = feedback_fn
1589

    
1590
    feedback_fn("* Verifying cluster config")
1591

    
1592
    for msg in self.cfg.VerifyConfig():
1593
      self._ErrorIf(True, self.ECLUSTERCFG, None, msg)
1594

    
1595
    feedback_fn("* Verifying cluster certificate files")
1596

    
1597
    for cert_filename in constants.ALL_CERT_FILES:
1598
      (errcode, msg) = _VerifyCertificate(cert_filename)
1599
      self._ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
1600

    
1601
    feedback_fn("* Verifying hypervisor parameters")
1602

    
1603
    self._VerifyHVP(_GetAllHypervisorParameters(self.cfg.GetClusterInfo(),
1604
                                                self.all_inst_info.values()))
1605

    
1606
    feedback_fn("* Verifying all nodes belong to an existing group")
1607

    
1608
    # We do this verification here because, should this bogus circumstance
1609
    # occur, it would never be caught by VerifyGroup, which only acts on
1610
    # nodes/instances reachable from existing node groups.
1611

    
1612
    dangling_nodes = set(node.name for node in self.all_node_info.values()
1613
                         if node.group not in self.all_group_info)
1614

    
1615
    dangling_instances = {}
1616
    no_node_instances = []
1617

    
1618
    for inst in self.all_inst_info.values():
1619
      if inst.primary_node in dangling_nodes:
1620
        dangling_instances.setdefault(inst.primary_node, []).append(inst.name)
1621
      elif inst.primary_node not in self.all_node_info:
1622
        no_node_instances.append(inst.name)
1623

    
1624
    pretty_dangling = [
1625
        "%s (%s)" %
1626
        (node.name,
1627
         utils.CommaJoin(dangling_instances.get(node.name,
1628
                                                ["no instances"])))
1629
        for node in dangling_nodes]
1630

    
1631
    self._ErrorIf(bool(dangling_nodes), self.ECLUSTERDANGLINGNODES, None,
1632
                  "the following nodes (and their instances) belong to a non"
1633
                  " existing group: %s", utils.CommaJoin(pretty_dangling))
1634

    
1635
    self._ErrorIf(bool(no_node_instances), self.ECLUSTERDANGLINGINST, None,
1636
                  "the following instances have a non-existing primary-node:"
1637
                  " %s", utils.CommaJoin(no_node_instances))
1638

    
1639
    return not self.bad
1640

    
1641

    
1642
class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
1643
  """Verifies the status of a node group.
1644

1645
  """
1646
  HPATH = "cluster-verify"
1647
  HTYPE = constants.HTYPE_CLUSTER
1648
  REQ_BGL = False
1649

    
1650
  _HOOKS_INDENT_RE = re.compile("^", re.M)
1651

    
1652
  class NodeImage(object):
1653
    """A class representing the logical and physical status of a node.
1654

1655
    @type name: string
1656
    @ivar name: the node name to which this object refers
1657
    @ivar volumes: a structure as returned from
1658
        L{ganeti.backend.GetVolumeList} (runtime)
1659
    @ivar instances: a list of running instances (runtime)
1660
    @ivar pinst: list of configured primary instances (config)
1661
    @ivar sinst: list of configured secondary instances (config)
1662
    @ivar sbp: dictionary of {primary-node: list of instances} for all
1663
        instances for which this node is secondary (config)
1664
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1665
    @ivar dfree: free disk, as reported by the node (runtime)
1666
    @ivar offline: the offline status (config)
1667
    @type rpc_fail: boolean
1668
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1669
        not whether the individual keys were correct) (runtime)
1670
    @type lvm_fail: boolean
1671
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1672
    @type hyp_fail: boolean
1673
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1674
    @type ghost: boolean
1675
    @ivar ghost: whether this is a known node or not (config)
1676
    @type os_fail: boolean
1677
    @ivar os_fail: whether the RPC call didn't return valid OS data
1678
    @type oslist: list
1679
    @ivar oslist: list of OSes as diagnosed by DiagnoseOS
1680
    @type vm_capable: boolean
1681
    @ivar vm_capable: whether the node can host instances
1682

1683
    """
1684
    def __init__(self, offline=False, name=None, vm_capable=True):
1685
      self.name = name
1686
      self.volumes = {}
1687
      self.instances = []
1688
      self.pinst = []
1689
      self.sinst = []
1690
      self.sbp = {}
1691
      self.mfree = 0
1692
      self.dfree = 0
1693
      self.offline = offline
1694
      self.vm_capable = vm_capable
1695
      self.rpc_fail = False
1696
      self.lvm_fail = False
1697
      self.hyp_fail = False
1698
      self.ghost = False
1699
      self.os_fail = False
1700
      self.oslist = {}
1701

    
1702
  def ExpandNames(self):
1703
    # This raises errors.OpPrereqError on its own:
1704
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
1705

    
1706
    # Get instances in node group; this is unsafe and needs verification later
1707
    inst_names = self.cfg.GetNodeGroupInstances(self.group_uuid)
1708

    
1709
    self.needed_locks = {
1710
      locking.LEVEL_INSTANCE: inst_names,
1711
      locking.LEVEL_NODEGROUP: [self.group_uuid],
1712
      locking.LEVEL_NODE: [],
1713
      }
1714

    
1715
    self.share_locks = _ShareAll()
1716

    
1717
  def DeclareLocks(self, level):
1718
    if level == locking.LEVEL_NODE:
1719
      # Get members of node group; this is unsafe and needs verification later
1720
      nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
1721

    
1722
      all_inst_info = self.cfg.GetAllInstancesInfo()
1723

    
1724
      # In Exec(), we warn about mirrored instances that have primary and
1725
      # secondary living in separate node groups. To fully verify that
1726
      # volumes for these instances are healthy, we will need to do an
1727
      # extra call to their secondaries. We ensure here those nodes will
1728
      # be locked.
1729
      for inst in self.owned_locks(locking.LEVEL_INSTANCE):
1730
        # Important: access only the instances whose lock is owned
1731
        if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR:
1732
          nodes.update(all_inst_info[inst].secondary_nodes)
1733

    
1734
      self.needed_locks[locking.LEVEL_NODE] = nodes
1735

    
1736
  def CheckPrereq(self):
1737
    assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
1738
    self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
1739

    
1740
    group_nodes = set(self.group_info.members)
1741
    group_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
1742

    
1743
    unlocked_nodes = \
1744
        group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
1745

    
1746
    unlocked_instances = \
1747
        group_instances.difference(self.owned_locks(locking.LEVEL_INSTANCE))
1748

    
1749
    if unlocked_nodes:
1750
      raise errors.OpPrereqError("Missing lock for nodes: %s" %
1751
                                 utils.CommaJoin(unlocked_nodes))
1752

    
1753
    if unlocked_instances:
1754
      raise errors.OpPrereqError("Missing lock for instances: %s" %
1755
                                 utils.CommaJoin(unlocked_instances))
1756

    
1757
    self.all_node_info = self.cfg.GetAllNodesInfo()
1758
    self.all_inst_info = self.cfg.GetAllInstancesInfo()
1759

    
1760
    self.my_node_names = utils.NiceSort(group_nodes)
1761
    self.my_inst_names = utils.NiceSort(group_instances)
1762

    
1763
    self.my_node_info = dict((name, self.all_node_info[name])
1764
                             for name in self.my_node_names)
1765

    
1766
    self.my_inst_info = dict((name, self.all_inst_info[name])
1767
                             for name in self.my_inst_names)
1768

    
1769
    # We detect here the nodes that will need the extra RPC calls for verifying
1770
    # split LV volumes; they should be locked.
1771
    extra_lv_nodes = set()
1772

    
1773
    for inst in self.my_inst_info.values():
1774
      if inst.disk_template in constants.DTS_INT_MIRROR:
1775
        group = self.my_node_info[inst.primary_node].group
1776
        for nname in inst.secondary_nodes:
1777
          if self.all_node_info[nname].group != group:
1778
            extra_lv_nodes.add(nname)
1779

    
1780
    unlocked_lv_nodes = \
1781
        extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
1782

    
1783
    if unlocked_lv_nodes:
1784
      raise errors.OpPrereqError("these nodes could be locked: %s" %
1785
                                 utils.CommaJoin(unlocked_lv_nodes))
1786
    self.extra_lv_nodes = list(extra_lv_nodes)
1787

    
1788
  def _VerifyNode(self, ninfo, nresult):
1789
    """Perform some basic validation on data returned from a node.
1790

1791
      - check the result data structure is well formed and has all the
1792
        mandatory fields
1793
      - check ganeti version
1794

1795
    @type ninfo: L{objects.Node}
1796
    @param ninfo: the node to check
1797
    @param nresult: the results from the node
1798
    @rtype: boolean
1799
    @return: whether overall this call was successful (and we can expect
1800
         reasonable values in the respose)
1801

1802
    """
1803
    node = ninfo.name
1804
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
1805

    
1806
    # main result, nresult should be a non-empty dict
1807
    test = not nresult or not isinstance(nresult, dict)
1808
    _ErrorIf(test, self.ENODERPC, node,
1809
                  "unable to verify node: no data returned")
1810
    if test:
1811
      return False
1812

    
1813
    # compares ganeti version
1814
    local_version = constants.PROTOCOL_VERSION
1815
    remote_version = nresult.get("version", None)
1816
    test = not (remote_version and
1817
                isinstance(remote_version, (list, tuple)) and
1818
                len(remote_version) == 2)
1819
    _ErrorIf(test, self.ENODERPC, node,
1820
             "connection to node returned invalid data")
1821
    if test:
1822
      return False
1823

    
1824
    test = local_version != remote_version[0]
1825
    _ErrorIf(test, self.ENODEVERSION, node,
1826
             "incompatible protocol versions: master %s,"
1827
             " node %s", local_version, remote_version[0])
1828
    if test:
1829
      return False
1830

    
1831
    # node seems compatible, we can actually try to look into its results
1832

    
1833
    # full package version
1834
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1835
                  self.ENODEVERSION, node,
1836
                  "software version mismatch: master %s, node %s",
1837
                  constants.RELEASE_VERSION, remote_version[1],
1838
                  code=self.ETYPE_WARNING)
1839

    
1840
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1841
    if ninfo.vm_capable and isinstance(hyp_result, dict):
1842
      for hv_name, hv_result in hyp_result.iteritems():
1843
        test = hv_result is not None
1844
        _ErrorIf(test, self.ENODEHV, node,
1845
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1846

    
1847
    hvp_result = nresult.get(constants.NV_HVPARAMS, None)
1848
    if ninfo.vm_capable and isinstance(hvp_result, list):
1849
      for item, hv_name, hv_result in hvp_result:
1850
        _ErrorIf(True, self.ENODEHV, node,
1851
                 "hypervisor %s parameter verify failure (source %s): %s",
1852
                 hv_name, item, hv_result)
1853

    
1854
    test = nresult.get(constants.NV_NODESETUP,
1855
                       ["Missing NODESETUP results"])
1856
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1857
             "; ".join(test))
1858

    
1859
    return True
1860

    
1861
  def _VerifyNodeTime(self, ninfo, nresult,
1862
                      nvinfo_starttime, nvinfo_endtime):
1863
    """Check the node time.
1864

1865
    @type ninfo: L{objects.Node}
1866
    @param ninfo: the node to check
1867
    @param nresult: the remote results for the node
1868
    @param nvinfo_starttime: the start time of the RPC call
1869
    @param nvinfo_endtime: the end time of the RPC call
1870

1871
    """
1872
    node = ninfo.name
1873
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
1874

    
1875
    ntime = nresult.get(constants.NV_TIME, None)
1876
    try:
1877
      ntime_merged = utils.MergeTime(ntime)
1878
    except (ValueError, TypeError):
1879
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1880
      return
1881

    
1882
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1883
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1884
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1885
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1886
    else:
1887
      ntime_diff = None
1888

    
1889
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1890
             "Node time diverges by at least %s from master node time",
1891
             ntime_diff)
1892

    
1893
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1894
    """Check the node LVM results.
1895

1896
    @type ninfo: L{objects.Node}
1897
    @param ninfo: the node to check
1898
    @param nresult: the remote results for the node
1899
    @param vg_name: the configured VG name
1900

1901
    """
1902
    if vg_name is None:
1903
      return
1904

    
1905
    node = ninfo.name
1906
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
1907

    
1908
    # checks vg existence and size > 20G
1909
    vglist = nresult.get(constants.NV_VGLIST, None)
1910
    test = not vglist
1911
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1912
    if not test:
1913
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1914
                                            constants.MIN_VG_SIZE)
1915
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1916

    
1917
    # check pv names
1918
    pvlist = nresult.get(constants.NV_PVLIST, None)
1919
    test = pvlist is None
1920
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1921
    if not test:
1922
      # check that ':' is not present in PV names, since it's a
1923
      # special character for lvcreate (denotes the range of PEs to
1924
      # use on the PV)
1925
      for _, pvname, owner_vg in pvlist:
1926
        test = ":" in pvname
1927
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1928
                 " '%s' of VG '%s'", pvname, owner_vg)
1929

    
1930
  def _VerifyNodeBridges(self, ninfo, nresult, bridges):
1931
    """Check the node bridges.
1932

1933
    @type ninfo: L{objects.Node}
1934
    @param ninfo: the node to check
1935
    @param nresult: the remote results for the node
1936
    @param bridges: the expected list of bridges
1937

1938
    """
1939
    if not bridges:
1940
      return
1941

    
1942
    node = ninfo.name
1943
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
1944

    
1945
    missing = nresult.get(constants.NV_BRIDGES, None)
1946
    test = not isinstance(missing, list)
1947
    _ErrorIf(test, self.ENODENET, node,
1948
             "did not return valid bridge information")
1949
    if not test:
1950
      _ErrorIf(bool(missing), self.ENODENET, node, "missing bridges: %s" %
1951
               utils.CommaJoin(sorted(missing)))
1952

    
1953
  def _VerifyNodeNetwork(self, ninfo, nresult):
1954
    """Check the node network connectivity results.
1955

1956
    @type ninfo: L{objects.Node}
1957
    @param ninfo: the node to check
1958
    @param nresult: the remote results for the node
1959

1960
    """
1961
    node = ninfo.name
1962
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
1963

    
1964
    test = constants.NV_NODELIST not in nresult
1965
    _ErrorIf(test, self.ENODESSH, node,
1966
             "node hasn't returned node ssh connectivity data")
1967
    if not test:
1968
      if nresult[constants.NV_NODELIST]:
1969
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1970
          _ErrorIf(True, self.ENODESSH, node,
1971
                   "ssh communication with node '%s': %s", a_node, a_msg)
1972

    
1973
    test = constants.NV_NODENETTEST not in nresult
1974
    _ErrorIf(test, self.ENODENET, node,
1975
             "node hasn't returned node tcp connectivity data")
1976
    if not test:
1977
      if nresult[constants.NV_NODENETTEST]:
1978
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1979
        for anode in nlist:
1980
          _ErrorIf(True, self.ENODENET, node,
1981
                   "tcp communication with node '%s': %s",
1982
                   anode, nresult[constants.NV_NODENETTEST][anode])
1983

    
1984
    test = constants.NV_MASTERIP not in nresult
1985
    _ErrorIf(test, self.ENODENET, node,
1986
             "node hasn't returned node master IP reachability data")
1987
    if not test:
1988
      if not nresult[constants.NV_MASTERIP]:
1989
        if node == self.master_node:
1990
          msg = "the master node cannot reach the master IP (not configured?)"
1991
        else:
1992
          msg = "cannot reach the master IP"
1993
        _ErrorIf(True, self.ENODENET, node, msg)
1994

    
1995
  def _VerifyInstance(self, instance, instanceconfig, node_image,
1996
                      diskstatus):
1997
    """Verify an instance.
1998

1999
    This function checks to see if the required block devices are
2000
    available on the instance's node.
2001

2002
    """
2003
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2004
    node_current = instanceconfig.primary_node
2005

    
2006
    node_vol_should = {}
2007
    instanceconfig.MapLVsByNode(node_vol_should)
2008

    
2009
    for node in node_vol_should:
2010
      n_img = node_image[node]
2011
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
2012
        # ignore missing volumes on offline or broken nodes
2013
        continue
2014
      for volume in node_vol_should[node]:
2015
        test = volume not in n_img.volumes
2016
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
2017
                 "volume %s missing on node %s", volume, node)
2018

    
2019
    if instanceconfig.admin_up:
2020
      pri_img = node_image[node_current]
2021
      test = instance not in pri_img.instances and not pri_img.offline
2022
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
2023
               "instance not running on its primary node %s",
2024
               node_current)
2025

    
2026
    diskdata = [(nname, success, status, idx)
2027
                for (nname, disks) in diskstatus.items()
2028
                for idx, (success, status) in enumerate(disks)]
2029

    
2030
    for nname, success, bdev_status, idx in diskdata:
2031
      # the 'ghost node' construction in Exec() ensures that we have a
2032
      # node here
2033
      snode = node_image[nname]
2034
      bad_snode = snode.ghost or snode.offline
2035
      _ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
2036
               self.EINSTANCEFAULTYDISK, instance,
2037
               "couldn't retrieve status for disk/%s on %s: %s",
2038
               idx, nname, bdev_status)
2039
      _ErrorIf((instanceconfig.admin_up and success and
2040
                bdev_status.ldisk_status == constants.LDS_FAULTY),
2041
               self.EINSTANCEFAULTYDISK, instance,
2042
               "disk/%s on %s is faulty", idx, nname)
2043

    
2044
  def _VerifyOrphanVolumes(self, node_vol_should, node_image, reserved):
2045
    """Verify if there are any unknown volumes in the cluster.
2046

2047
    The .os, .swap and backup volumes are ignored. All other volumes are
2048
    reported as unknown.
2049

2050
    @type reserved: L{ganeti.utils.FieldSet}
2051
    @param reserved: a FieldSet of reserved volume names
2052

2053
    """
2054
    for node, n_img in node_image.items():
2055
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
2056
        # skip non-healthy nodes
2057
        continue
2058
      for volume in n_img.volumes:
2059
        test = ((node not in node_vol_should or
2060
                volume not in node_vol_should[node]) and
2061
                not reserved.Matches(volume))
2062
        self._ErrorIf(test, self.ENODEORPHANLV, node,
2063
                      "volume %s is unknown", volume)
2064

    
2065
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
2066
    """Verify N+1 Memory Resilience.
2067

2068
    Check that if one single node dies we can still start all the
2069
    instances it was primary for.
2070

2071
    """
2072
    cluster_info = self.cfg.GetClusterInfo()
2073
    for node, n_img in node_image.items():
2074
      # This code checks that every node which is now listed as
2075
      # secondary has enough memory to host all instances it is
2076
      # supposed to should a single other node in the cluster fail.
2077
      # FIXME: not ready for failover to an arbitrary node
2078
      # FIXME: does not support file-backed instances
2079
      # WARNING: we currently take into account down instances as well
2080
      # as up ones, considering that even if they're down someone
2081
      # might want to start them even in the event of a node failure.
2082
      if n_img.offline:
2083
        # we're skipping offline nodes from the N+1 warning, since
2084
        # most likely we don't have good memory infromation from them;
2085
        # we already list instances living on such nodes, and that's
2086
        # enough warning
2087
        continue
2088
      for prinode, instances in n_img.sbp.items():
2089
        needed_mem = 0
2090
        for instance in instances:
2091
          bep = cluster_info.FillBE(instance_cfg[instance])
2092
          if bep[constants.BE_AUTO_BALANCE]:
2093
            needed_mem += bep[constants.BE_MEMORY]
2094
        test = n_img.mfree < needed_mem
2095
        self._ErrorIf(test, self.ENODEN1, node,
2096
                      "not enough memory to accomodate instance failovers"
2097
                      " should node %s fail (%dMiB needed, %dMiB available)",
2098
                      prinode, needed_mem, n_img.mfree)
2099

    
2100
  @classmethod
2101
  def _VerifyFiles(cls, errorif, nodeinfo, master_node, all_nvinfo,
2102
                   (files_all, files_all_opt, files_mc, files_vm)):
2103
    """Verifies file checksums collected from all nodes.
2104

2105
    @param errorif: Callback for reporting errors
2106
    @param nodeinfo: List of L{objects.Node} objects
2107
    @param master_node: Name of master node
2108
    @param all_nvinfo: RPC results
2109

2110
    """
2111
    node_names = frozenset(node.name for node in nodeinfo if not node.offline)
2112

    
2113
    assert master_node in node_names
2114
    assert (len(files_all | files_all_opt | files_mc | files_vm) ==
2115
            sum(map(len, [files_all, files_all_opt, files_mc, files_vm]))), \
2116
           "Found file listed in more than one file list"
2117

    
2118
    # Define functions determining which nodes to consider for a file
2119
    file2nodefn = dict([(filename, fn)
2120
      for (files, fn) in [(files_all, None),
2121
                          (files_all_opt, None),
2122
                          (files_mc, lambda node: (node.master_candidate or
2123
                                                   node.name == master_node)),
2124
                          (files_vm, lambda node: node.vm_capable)]
2125
      for filename in files])
2126

    
2127
    fileinfo = dict((filename, {}) for filename in file2nodefn.keys())
2128

    
2129
    for node in nodeinfo:
2130
      if node.offline:
2131
        continue
2132

    
2133
      nresult = all_nvinfo[node.name]
2134

    
2135
      if nresult.fail_msg or not nresult.payload:
2136
        node_files = None
2137
      else:
2138
        node_files = nresult.payload.get(constants.NV_FILELIST, None)
2139

    
2140
      test = not (node_files and isinstance(node_files, dict))
2141
      errorif(test, cls.ENODEFILECHECK, node.name,
2142
              "Node did not return file checksum data")
2143
      if test:
2144
        continue
2145

    
2146
      for (filename, checksum) in node_files.items():
2147
        # Check if the file should be considered for a node
2148
        fn = file2nodefn[filename]
2149
        if fn is None or fn(node):
2150
          fileinfo[filename].setdefault(checksum, set()).add(node.name)
2151

    
2152
    for (filename, checksums) in fileinfo.items():
2153
      assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum"
2154

    
2155
      # Nodes having the file
2156
      with_file = frozenset(node_name
2157
                            for nodes in fileinfo[filename].values()
2158
                            for node_name in nodes)
2159

    
2160
      # Nodes missing file
2161
      missing_file = node_names - with_file
2162

    
2163
      if filename in files_all_opt:
2164
        # All or no nodes
2165
        errorif(missing_file and missing_file != node_names,
2166
                cls.ECLUSTERFILECHECK, None,
2167
                "File %s is optional, but it must exist on all or no"
2168
                " nodes (not found on %s)",
2169
                filename, utils.CommaJoin(utils.NiceSort(missing_file)))
2170
      else:
2171
        errorif(missing_file, cls.ECLUSTERFILECHECK, None,
2172
                "File %s is missing from node(s) %s", filename,
2173
                utils.CommaJoin(utils.NiceSort(missing_file)))
2174

    
2175
      # See if there are multiple versions of the file
2176
      test = len(checksums) > 1
2177
      if test:
2178
        variants = ["variant %s on %s" %
2179
                    (idx + 1, utils.CommaJoin(utils.NiceSort(nodes)))
2180
                    for (idx, (checksum, nodes)) in
2181
                      enumerate(sorted(checksums.items()))]
2182
      else:
2183
        variants = []
2184

    
2185
      errorif(test, cls.ECLUSTERFILECHECK, None,
2186
              "File %s found with %s different checksums (%s)",
2187
              filename, len(checksums), "; ".join(variants))
2188

    
2189
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_helper,
2190
                      drbd_map):
2191
    """Verifies and the node DRBD status.
2192

2193
    @type ninfo: L{objects.Node}
2194
    @param ninfo: the node to check
2195
    @param nresult: the remote results for the node
2196
    @param instanceinfo: the dict of instances
2197
    @param drbd_helper: the configured DRBD usermode helper
2198
    @param drbd_map: the DRBD map as returned by
2199
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
2200

2201
    """
2202
    node = ninfo.name
2203
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2204

    
2205
    if drbd_helper:
2206
      helper_result = nresult.get(constants.NV_DRBDHELPER, None)
2207
      test = (helper_result == None)
2208
      _ErrorIf(test, self.ENODEDRBDHELPER, node,
2209
               "no drbd usermode helper returned")
2210
      if helper_result:
2211
        status, payload = helper_result
2212
        test = not status
2213
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
2214
                 "drbd usermode helper check unsuccessful: %s", payload)
2215
        test = status and (payload != drbd_helper)
2216
        _ErrorIf(test, self.ENODEDRBDHELPER, node,
2217
                 "wrong drbd usermode helper: %s", payload)
2218

    
2219
    # compute the DRBD minors
2220
    node_drbd = {}
2221
    for minor, instance in drbd_map[node].items():
2222
      test = instance not in instanceinfo
2223
      _ErrorIf(test, self.ECLUSTERCFG, None,
2224
               "ghost instance '%s' in temporary DRBD map", instance)
2225
        # ghost instance should not be running, but otherwise we
2226
        # don't give double warnings (both ghost instance and
2227
        # unallocated minor in use)
2228
      if test:
2229
        node_drbd[minor] = (instance, False)
2230
      else:
2231
        instance = instanceinfo[instance]
2232
        node_drbd[minor] = (instance.name, instance.admin_up)
2233

    
2234
    # and now check them
2235
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
2236
    test = not isinstance(used_minors, (tuple, list))
2237
    _ErrorIf(test, self.ENODEDRBD, node,
2238
             "cannot parse drbd status file: %s", str(used_minors))
2239
    if test:
2240
      # we cannot check drbd status
2241
      return
2242

    
2243
    for minor, (iname, must_exist) in node_drbd.items():
2244
      test = minor not in used_minors and must_exist
2245
      _ErrorIf(test, self.ENODEDRBD, node,
2246
               "drbd minor %d of instance %s is not active", minor, iname)
2247
    for minor in used_minors:
2248
      test = minor not in node_drbd
2249
      _ErrorIf(test, self.ENODEDRBD, node,
2250
               "unallocated drbd minor %d is in use", minor)
2251

    
2252
  def _UpdateNodeOS(self, ninfo, nresult, nimg):
2253
    """Builds the node OS structures.
2254

2255
    @type ninfo: L{objects.Node}
2256
    @param ninfo: the node to check
2257
    @param nresult: the remote results for the node
2258
    @param nimg: the node image object
2259

2260
    """
2261
    node = ninfo.name
2262
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2263

    
2264
    remote_os = nresult.get(constants.NV_OSLIST, None)
2265
    test = (not isinstance(remote_os, list) or
2266
            not compat.all(isinstance(v, list) and len(v) == 7
2267
                           for v in remote_os))
2268

    
2269
    _ErrorIf(test, self.ENODEOS, node,
2270
             "node hasn't returned valid OS data")
2271

    
2272
    nimg.os_fail = test
2273

    
2274
    if test:
2275
      return
2276

    
2277
    os_dict = {}
2278

    
2279
    for (name, os_path, status, diagnose,
2280
         variants, parameters, api_ver) in nresult[constants.NV_OSLIST]:
2281

    
2282
      if name not in os_dict:
2283
        os_dict[name] = []
2284

    
2285
      # parameters is a list of lists instead of list of tuples due to
2286
      # JSON lacking a real tuple type, fix it:
2287
      parameters = [tuple(v) for v in parameters]
2288
      os_dict[name].append((os_path, status, diagnose,
2289
                            set(variants), set(parameters), set(api_ver)))
2290

    
2291
    nimg.oslist = os_dict
2292

    
2293
  def _VerifyNodeOS(self, ninfo, nimg, base):
2294
    """Verifies the node OS list.
2295

2296
    @type ninfo: L{objects.Node}
2297
    @param ninfo: the node to check
2298
    @param nimg: the node image object
2299
    @param base: the 'template' node we match against (e.g. from the master)
2300

2301
    """
2302
    node = ninfo.name
2303
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2304

    
2305
    assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
2306

    
2307
    beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
2308
    for os_name, os_data in nimg.oslist.items():
2309
      assert os_data, "Empty OS status for OS %s?!" % os_name
2310
      f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
2311
      _ErrorIf(not f_status, self.ENODEOS, node,
2312
               "Invalid OS %s (located at %s): %s", os_name, f_path, f_diag)
2313
      _ErrorIf(len(os_data) > 1, self.ENODEOS, node,
2314
               "OS '%s' has multiple entries (first one shadows the rest): %s",
2315
               os_name, utils.CommaJoin([v[0] for v in os_data]))
2316
      # comparisons with the 'base' image
2317
      test = os_name not in base.oslist
2318
      _ErrorIf(test, self.ENODEOS, node,
2319
               "Extra OS %s not present on reference node (%s)",
2320
               os_name, base.name)
2321
      if test:
2322
        continue
2323
      assert base.oslist[os_name], "Base node has empty OS status?"
2324
      _, b_status, _, b_var, b_param, b_api = base.oslist[os_name][0]
2325
      if not b_status:
2326
        # base OS is invalid, skipping
2327
        continue
2328
      for kind, a, b in [("API version", f_api, b_api),
2329
                         ("variants list", f_var, b_var),
2330
                         ("parameters", beautify_params(f_param),
2331
                          beautify_params(b_param))]:
2332
        _ErrorIf(a != b, self.ENODEOS, node,
2333
                 "OS %s for %s differs from reference node %s: [%s] vs. [%s]",
2334
                 kind, os_name, base.name,
2335
                 utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
2336

    
2337
    # check any missing OSes
2338
    missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
2339
    _ErrorIf(missing, self.ENODEOS, node,
2340
             "OSes present on reference node %s but missing on this node: %s",
2341
             base.name, utils.CommaJoin(missing))
2342

    
2343
  def _VerifyOob(self, ninfo, nresult):
2344
    """Verifies out of band functionality of a node.
2345

2346
    @type ninfo: L{objects.Node}
2347
    @param ninfo: the node to check
2348
    @param nresult: the remote results for the node
2349

2350
    """
2351
    node = ninfo.name
2352
    # We just have to verify the paths on master and/or master candidates
2353
    # as the oob helper is invoked on the master
2354
    if ((ninfo.master_candidate or ninfo.master_capable) and
2355
        constants.NV_OOB_PATHS in nresult):
2356
      for path_result in nresult[constants.NV_OOB_PATHS]:
2357
        self._ErrorIf(path_result, self.ENODEOOBPATH, node, path_result)
2358

    
2359
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
2360
    """Verifies and updates the node volume data.
2361

2362
    This function will update a L{NodeImage}'s internal structures
2363
    with data from the remote call.
2364

2365
    @type ninfo: L{objects.Node}
2366
    @param ninfo: the node to check
2367
    @param nresult: the remote results for the node
2368
    @param nimg: the node image object
2369
    @param vg_name: the configured VG name
2370

2371
    """
2372
    node = ninfo.name
2373
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2374

    
2375
    nimg.lvm_fail = True
2376
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
2377
    if vg_name is None:
2378
      pass
2379
    elif isinstance(lvdata, basestring):
2380
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
2381
               utils.SafeEncode(lvdata))
2382
    elif not isinstance(lvdata, dict):
2383
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
2384
    else:
2385
      nimg.volumes = lvdata
2386
      nimg.lvm_fail = False
2387

    
2388
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
2389
    """Verifies and updates the node instance list.
2390

2391
    If the listing was successful, then updates this node's instance
2392
    list. Otherwise, it marks the RPC call as failed for the instance
2393
    list key.
2394

2395
    @type ninfo: L{objects.Node}
2396
    @param ninfo: the node to check
2397
    @param nresult: the remote results for the node
2398
    @param nimg: the node image object
2399

2400
    """
2401
    idata = nresult.get(constants.NV_INSTANCELIST, None)
2402
    test = not isinstance(idata, list)
2403
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
2404
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
2405
    if test:
2406
      nimg.hyp_fail = True
2407
    else:
2408
      nimg.instances = idata
2409

    
2410
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
2411
    """Verifies and computes a node information map
2412

2413
    @type ninfo: L{objects.Node}
2414
    @param ninfo: the node to check
2415
    @param nresult: the remote results for the node
2416
    @param nimg: the node image object
2417
    @param vg_name: the configured VG name
2418

2419
    """
2420
    node = ninfo.name
2421
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2422

    
2423
    # try to read free memory (from the hypervisor)
2424
    hv_info = nresult.get(constants.NV_HVINFO, None)
2425
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
2426
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
2427
    if not test:
2428
      try:
2429
        nimg.mfree = int(hv_info["memory_free"])
2430
      except (ValueError, TypeError):
2431
        _ErrorIf(True, self.ENODERPC, node,
2432
                 "node returned invalid nodeinfo, check hypervisor")
2433

    
2434
    # FIXME: devise a free space model for file based instances as well
2435
    if vg_name is not None:
2436
      test = (constants.NV_VGLIST not in nresult or
2437
              vg_name not in nresult[constants.NV_VGLIST])
2438
      _ErrorIf(test, self.ENODELVM, node,
2439
               "node didn't return data for the volume group '%s'"
2440
               " - it is either missing or broken", vg_name)
2441
      if not test:
2442
        try:
2443
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
2444
        except (ValueError, TypeError):
2445
          _ErrorIf(True, self.ENODERPC, node,
2446
                   "node returned invalid LVM info, check LVM status")
2447

    
2448
  def _CollectDiskInfo(self, nodelist, node_image, instanceinfo):
2449
    """Gets per-disk status information for all instances.
2450

2451
    @type nodelist: list of strings
2452
    @param nodelist: Node names
2453
    @type node_image: dict of (name, L{objects.Node})
2454
    @param node_image: Node objects
2455
    @type instanceinfo: dict of (name, L{objects.Instance})
2456
    @param instanceinfo: Instance objects
2457
    @rtype: {instance: {node: [(succes, payload)]}}
2458
    @return: a dictionary of per-instance dictionaries with nodes as
2459
        keys and disk information as values; the disk information is a
2460
        list of tuples (success, payload)
2461

2462
    """
2463
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2464

    
2465
    node_disks = {}
2466
    node_disks_devonly = {}
2467
    diskless_instances = set()
2468
    diskless = constants.DT_DISKLESS
2469

    
2470
    for nname in nodelist:
2471
      node_instances = list(itertools.chain(node_image[nname].pinst,
2472
                                            node_image[nname].sinst))
2473
      diskless_instances.update(inst for inst in node_instances
2474
                                if instanceinfo[inst].disk_template == diskless)
2475
      disks = [(inst, disk)
2476
               for inst in node_instances
2477
               for disk in instanceinfo[inst].disks]
2478

    
2479
      if not disks:
2480
        # No need to collect data
2481
        continue
2482

    
2483
      node_disks[nname] = disks
2484

    
2485
      # Creating copies as SetDiskID below will modify the objects and that can
2486
      # lead to incorrect data returned from nodes
2487
      devonly = [dev.Copy() for (_, dev) in disks]
2488

    
2489
      for dev in devonly:
2490
        self.cfg.SetDiskID(dev, nname)
2491

    
2492
      node_disks_devonly[nname] = devonly
2493

    
2494
    assert len(node_disks) == len(node_disks_devonly)
2495

    
2496
    # Collect data from all nodes with disks
2497
    result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
2498
                                                          node_disks_devonly)
2499

    
2500
    assert len(result) == len(node_disks)
2501

    
2502
    instdisk = {}
2503

    
2504
    for (nname, nres) in result.items():
2505
      disks = node_disks[nname]
2506

    
2507
      if nres.offline:
2508
        # No data from this node
2509
        data = len(disks) * [(False, "node offline")]
2510
      else:
2511
        msg = nres.fail_msg
2512
        _ErrorIf(msg, self.ENODERPC, nname,
2513
                 "while getting disk information: %s", msg)
2514
        if msg:
2515
          # No data from this node
2516
          data = len(disks) * [(False, msg)]
2517
        else:
2518
          data = []
2519
          for idx, i in enumerate(nres.payload):
2520
            if isinstance(i, (tuple, list)) and len(i) == 2:
2521
              data.append(i)
2522
            else:
2523
              logging.warning("Invalid result from node %s, entry %d: %s",
2524
                              nname, idx, i)
2525
              data.append((False, "Invalid result from the remote node"))
2526

    
2527
      for ((inst, _), status) in zip(disks, data):
2528
        instdisk.setdefault(inst, {}).setdefault(nname, []).append(status)
2529

    
2530
    # Add empty entries for diskless instances.
2531
    for inst in diskless_instances:
2532
      assert inst not in instdisk
2533
      instdisk[inst] = {}
2534

    
2535
    assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
2536
                      len(nnames) <= len(instanceinfo[inst].all_nodes) and
2537
                      compat.all(isinstance(s, (tuple, list)) and
2538
                                 len(s) == 2 for s in statuses)
2539
                      for inst, nnames in instdisk.items()
2540
                      for nname, statuses in nnames.items())
2541
    assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
2542

    
2543
    return instdisk
2544

    
2545
  def BuildHooksEnv(self):
2546
    """Build hooks env.
2547

2548
    Cluster-Verify hooks just ran in the post phase and their failure makes
2549
    the output be logged in the verify output and the verification to fail.
2550

2551
    """
2552
    env = {
2553
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
2554
      }
2555

    
2556
    env.update(("NODE_TAGS_%s" % node.name, " ".join(node.GetTags()))
2557
               for node in self.my_node_info.values())
2558

    
2559
    return env
2560

    
2561
  def BuildHooksNodes(self):
2562
    """Build hooks nodes.
2563

2564
    """
2565
    return ([], self.my_node_names)
2566

    
2567
  def Exec(self, feedback_fn):
2568
    """Verify integrity of the node group, performing various test on nodes.
2569

2570
    """
2571
    # This method has too many local variables. pylint: disable=R0914
2572
    feedback_fn("* Verifying group '%s'" % self.group_info.name)
2573

    
2574
    if not self.my_node_names:
2575
      # empty node group
2576
      feedback_fn("* Empty node group, skipping verification")
2577
      return True
2578

    
2579
    self.bad = False
2580
    _ErrorIf = self._ErrorIf # pylint: disable=C0103
2581
    verbose = self.op.verbose
2582
    self._feedback_fn = feedback_fn
2583

    
2584
    vg_name = self.cfg.GetVGName()
2585
    drbd_helper = self.cfg.GetDRBDHelper()
2586
    cluster = self.cfg.GetClusterInfo()
2587
    groupinfo = self.cfg.GetAllNodeGroupsInfo()
2588
    hypervisors = cluster.enabled_hypervisors
2589
    node_data_list = [self.my_node_info[name] for name in self.my_node_names]
2590

    
2591
    i_non_redundant = [] # Non redundant instances
2592
    i_non_a_balanced = [] # Non auto-balanced instances
2593
    n_offline = 0 # Count of offline nodes
2594
    n_drained = 0 # Count of nodes being drained
2595
    node_vol_should = {}
2596

    
2597
    # FIXME: verify OS list
2598

    
2599
    # File verification
2600
    filemap = _ComputeAncillaryFiles(cluster, False)
2601

    
2602
    # do local checksums
2603
    master_node = self.master_node = self.cfg.GetMasterNode()
2604
    master_ip = self.cfg.GetMasterIP()
2605

    
2606
    feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_names))
2607

    
2608
    # We will make nodes contact all nodes in their group, and one node from
2609
    # every other group.
2610
    # TODO: should it be a *random* node, different every time?
2611
    online_nodes = [node.name for node in node_data_list if not node.offline]
2612
    other_group_nodes = {}
2613

    
2614
    for name in sorted(self.all_node_info):
2615
      node = self.all_node_info[name]
2616
      if (node.group not in other_group_nodes
2617
          and node.group != self.group_uuid
2618
          and not node.offline):
2619
        other_group_nodes[node.group] = node.name
2620

    
2621
    node_verify_param = {
2622
      constants.NV_FILELIST:
2623
        utils.UniqueSequence(filename
2624
                             for files in filemap
2625
                             for filename in files),
2626
      constants.NV_NODELIST: online_nodes + other_group_nodes.values(),
2627
      constants.NV_HYPERVISOR: hypervisors,
2628
      constants.NV_HVPARAMS:
2629
        _GetAllHypervisorParameters(cluster, self.all_inst_info.values()),
2630
      constants.NV_NODENETTEST: [(node.name, node.primary_ip, node.secondary_ip)
2631
                                 for node in node_data_list
2632
                                 if not node.offline],
2633
      constants.NV_INSTANCELIST: hypervisors,
2634
      constants.NV_VERSION: None,
2635
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
2636
      constants.NV_NODESETUP: None,
2637
      constants.NV_TIME: None,
2638
      constants.NV_MASTERIP: (master_node, master_ip),
2639
      constants.NV_OSLIST: None,
2640
      constants.NV_VMNODES: self.cfg.GetNonVmCapableNodeList(),
2641
      }
2642

    
2643
    if vg_name is not None:
2644
      node_verify_param[constants.NV_VGLIST] = None
2645
      node_verify_param[constants.NV_LVLIST] = vg_name
2646
      node_verify_param[constants.NV_PVLIST] = [vg_name]
2647
      node_verify_param[constants.NV_DRBDLIST] = None
2648

    
2649
    if drbd_helper:
2650
      node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
2651

    
2652
    # bridge checks
2653
    # FIXME: this needs to be changed per node-group, not cluster-wide
2654
    bridges = set()
2655
    default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
2656
    if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2657
      bridges.add(default_nicpp[constants.NIC_LINK])
2658
    for instance in self.my_inst_info.values():
2659
      for nic in instance.nics:
2660
        full_nic = cluster.SimpleFillNIC(nic.nicparams)
2661
        if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2662
          bridges.add(full_nic[constants.NIC_LINK])
2663

    
2664
    if bridges:
2665
      node_verify_param[constants.NV_BRIDGES] = list(bridges)
2666

    
2667
    # Build our expected cluster state
2668
    node_image = dict((node.name, self.NodeImage(offline=node.offline,
2669
                                                 name=node.name,
2670
                                                 vm_capable=node.vm_capable))
2671
                      for node in node_data_list)
2672

    
2673
    # Gather OOB paths
2674
    oob_paths = []
2675
    for node in self.all_node_info.values():
2676
      path = _SupportsOob(self.cfg, node)
2677
      if path and path not in oob_paths:
2678
        oob_paths.append(path)
2679

    
2680
    if oob_paths:
2681
      node_verify_param[constants.NV_OOB_PATHS] = oob_paths
2682

    
2683
    for instance in self.my_inst_names:
2684
      inst_config = self.my_inst_info[instance]
2685

    
2686
      for nname in inst_config.all_nodes:
2687
        if nname not in node_image:
2688
          gnode = self.NodeImage(name=nname)
2689
          gnode.ghost = (nname not in self.all_node_info)
2690
          node_image[nname] = gnode
2691

    
2692
      inst_config.MapLVsByNode(node_vol_should)
2693

    
2694
      pnode = inst_config.primary_node
2695
      node_image[pnode].pinst.append(instance)
2696

    
2697
      for snode in inst_config.secondary_nodes:
2698
        nimg = node_image[snode]
2699
        nimg.sinst.append(instance)
2700
        if pnode not in nimg.sbp:
2701
          nimg.sbp[pnode] = []
2702
        nimg.sbp[pnode].append(instance)
2703

    
2704
    # At this point, we have the in-memory data structures complete,
2705
    # except for the runtime information, which we'll gather next
2706

    
2707
    # Due to the way our RPC system works, exact response times cannot be
2708
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
2709
    # time before and after executing the request, we can at least have a time
2710
    # window.
2711
    nvinfo_starttime = time.time()
2712
    all_nvinfo = self.rpc.call_node_verify(self.my_node_names,
2713
                                           node_verify_param,
2714
                                           self.cfg.GetClusterName())
2715
    nvinfo_endtime = time.time()
2716

    
2717
    if self.extra_lv_nodes and vg_name is not None:
2718
      extra_lv_nvinfo = \
2719
          self.rpc.call_node_verify(self.extra_lv_nodes,
2720
                                    {constants.NV_LVLIST: vg_name},
2721
                                    self.cfg.GetClusterName())
2722
    else:
2723
      extra_lv_nvinfo = {}
2724

    
2725
    all_drbd_map = self.cfg.ComputeDRBDMap()
2726

    
2727
    feedback_fn("* Gathering disk information (%s nodes)" %
2728
                len(self.my_node_names))
2729
    instdisk = self._CollectDiskInfo(self.my_node_names, node_image,
2730
                                     self.my_inst_info)
2731

    
2732
    feedback_fn("* Verifying configuration file consistency")
2733

    
2734
    # If not all nodes are being checked, we need to make sure the master node
2735
    # and a non-checked vm_capable node are in the list.
2736
    absent_nodes = set(self.all_node_info).difference(self.my_node_info)
2737
    if absent_nodes:
2738
      vf_nvinfo = all_nvinfo.copy()
2739
      vf_node_info = list(self.my_node_info.values())
2740
      additional_nodes = []
2741
      if master_node not in self.my_node_info:
2742
        additional_nodes.append(master_node)
2743
        vf_node_info.append(self.all_node_info[master_node])
2744
      # Add the first vm_capable node we find which is not included
2745
      for node in absent_nodes:
2746
        nodeinfo = self.all_node_info[node]
2747
        if nodeinfo.vm_capable and not nodeinfo.offline:
2748
          additional_nodes.append(node)
2749
          vf_node_info.append(self.all_node_info[node])
2750
          break
2751
      key = constants.NV_FILELIST
2752
      vf_nvinfo.update(self.rpc.call_node_verify(additional_nodes,
2753
                                                 {key: node_verify_param[key]},
2754
                                                 self.cfg.GetClusterName()))
2755
    else:
2756
      vf_nvinfo = all_nvinfo
2757
      vf_node_info = self.my_node_info.values()
2758

    
2759
    self._VerifyFiles(_ErrorIf, vf_node_info, master_node, vf_nvinfo, filemap)
2760

    
2761
    feedback_fn("* Verifying node status")
2762

    
2763
    refos_img = None
2764

    
2765
    for node_i in node_data_list:
2766
      node = node_i.name
2767
      nimg = node_image[node]
2768

    
2769
      if node_i.offline:
2770
        if verbose:
2771
          feedback_fn("* Skipping offline node %s" % (node,))
2772
        n_offline += 1
2773
        continue
2774

    
2775
      if node == master_node:
2776
        ntype = "master"
2777
      elif node_i.master_candidate:
2778
        ntype = "master candidate"
2779
      elif node_i.drained:
2780
        ntype = "drained"
2781
        n_drained += 1
2782
      else:
2783
        ntype = "regular"
2784
      if verbose:
2785
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
2786

    
2787
      msg = all_nvinfo[node].fail_msg
2788
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
2789
      if msg:
2790
        nimg.rpc_fail = True
2791
        continue
2792

    
2793
      nresult = all_nvinfo[node].payload
2794

    
2795
      nimg.call_ok = self._VerifyNode(node_i, nresult)
2796
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
2797
      self._VerifyNodeNetwork(node_i, nresult)
2798
      self._VerifyOob(node_i, nresult)
2799

    
2800
      if nimg.vm_capable:
2801
        self._VerifyNodeLVM(node_i, nresult, vg_name)
2802
        self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info, drbd_helper,
2803
                             all_drbd_map)
2804

    
2805
        self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
2806
        self._UpdateNodeInstances(node_i, nresult, nimg)
2807
        self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
2808
        self._UpdateNodeOS(node_i, nresult, nimg)
2809

    
2810
        if not nimg.os_fail:
2811
          if refos_img is None:
2812
            refos_img = nimg
2813
          self._VerifyNodeOS(node_i, nimg, refos_img)
2814
        self._VerifyNodeBridges(node_i, nresult, bridges)
2815

    
2816
        # Check whether all running instancies are primary for the node. (This
2817
        # can no longer be done from _VerifyInstance below, since some of the
2818
        # wrong instances could be from other node groups.)
2819
        non_primary_inst = set(nimg.instances).difference(nimg.pinst)
2820

    
2821
        for inst in non_primary_inst:
2822
          test = inst in self.all_inst_info
2823
          _ErrorIf(test, self.EINSTANCEWRONGNODE, inst,
2824
                   "instance should not run on node %s", node_i.name)
2825
          _ErrorIf(not test, self.ENODEORPHANINSTANCE, node_i.name,
2826
                   "node is running unknown instance %s", inst)
2827

    
2828
    for node, result in extra_lv_nvinfo.items():
2829
      self._UpdateNodeVolumes(self.all_node_info[node], result.payload,
2830
                              node_image[node], vg_name)
2831

    
2832
    feedback_fn("* Verifying instance status")
2833
    for instance in self.my_inst_names:
2834
      if verbose:
2835
        feedback_fn("* Verifying instance %s" % instance)
2836
      inst_config = self.my_inst_info[instance]
2837
      self._VerifyInstance(instance, inst_config, node_image,
2838
                           instdisk[instance])
2839
      inst_nodes_offline = []
2840

    
2841
      pnode = inst_config.primary_node
2842
      pnode_img = node_image[pnode]
2843
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
2844
               self.ENODERPC, pnode, "instance %s, connection to"
2845
               " primary node failed", instance)
2846

    
2847
      _ErrorIf(inst_config.admin_up and pnode_img.offline,
2848
               self.EINSTANCEBADNODE, instance,
2849
               "instance is marked as running and lives on offline node %s",
2850
               inst_config.primary_node)
2851

    
2852
      # If the instance is non-redundant we cannot survive losing its primary
2853
      # node, so we are not N+1 compliant. On the other hand we have no disk
2854
      # templates with more than one secondary so that situation is not well
2855
      # supported either.
2856
      # FIXME: does not support file-backed instances
2857
      if not inst_config.secondary_nodes:
2858
        i_non_redundant.append(instance)
2859

    
2860
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
2861
               instance, "instance has multiple secondary nodes: %s",
2862
               utils.CommaJoin(inst_config.secondary_nodes),
2863
               code=self.ETYPE_WARNING)
2864

    
2865
      if inst_config.disk_template in constants.DTS_INT_MIRROR:
2866
        pnode = inst_config.primary_node
2867
        instance_nodes = utils.NiceSort(inst_config.all_nodes)
2868
        instance_groups = {}
2869

    
2870
        for node in instance_nodes:
2871
          instance_groups.setdefault(self.all_node_info[node].group,
2872
                                     []).append(node)
2873

    
2874
        pretty_list = [
2875
          "%s (group %s)" % (utils.CommaJoin(nodes), groupinfo[group].name)
2876
          # Sort so that we always list the primary node first.
2877
          for group, nodes in sorted(instance_groups.items(),
2878
                                     key=lambda (_, nodes): pnode in nodes,
2879
                                     reverse=True)]
2880

    
2881
        self._ErrorIf(len(instance_groups) > 1, self.EINSTANCESPLITGROUPS,
2882
                      instance, "instance has primary and secondary nodes in"
2883
                      " different groups: %s", utils.CommaJoin(pretty_list),
2884
                      code=self.ETYPE_WARNING)
2885

    
2886
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
2887
        i_non_a_balanced.append(instance)
2888

    
2889
      for snode in inst_config.secondary_nodes:
2890
        s_img = node_image[snode]
2891
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
2892
                 "instance %s, connection to secondary node failed", instance)
2893

    
2894
        if s_img.offline:
2895
          inst_nodes_offline.append(snode)
2896

    
2897
      # warn that the instance lives on offline nodes
2898
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
2899
               "instance has offline secondary node(s) %s",
2900
               utils.CommaJoin(inst_nodes_offline))
2901
      # ... or ghost/non-vm_capable nodes
2902
      for node in inst_config.all_nodes:
2903
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
2904
                 "instance lives on ghost node %s", node)
2905
        _ErrorIf(not node_image[node].vm_capable, self.EINSTANCEBADNODE,
2906
                 instance, "instance lives on non-vm_capable node %s", node)
2907

    
2908
    feedback_fn("* Verifying orphan volumes")
2909
    reserved = utils.FieldSet(*cluster.reserved_lvs)
2910

    
2911
    # We will get spurious "unknown volume" warnings if any node of this group
2912
    # is secondary for an instance whose primary is in another group. To avoid
2913
    # them, we find these instances and add their volumes to node_vol_should.
2914
    for inst in self.all_inst_info.values():
2915
      for secondary in inst.secondary_nodes:
2916
        if (secondary in self.my_node_info
2917
            and inst.name not in self.my_inst_info):
2918
          inst.MapLVsByNode(node_vol_should)
2919
          break
2920

    
2921
    self._VerifyOrphanVolumes(node_vol_should, node_image, reserved)
2922

    
2923
    if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
2924
      feedback_fn("* Verifying N+1 Memory redundancy")
2925
      self._VerifyNPlusOneMemory(node_image, self.my_inst_info)
2926

    
2927
    feedback_fn("* Other Notes")
2928
    if i_non_redundant:
2929
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
2930
                  % len(i_non_redundant))
2931

    
2932
    if i_non_a_balanced:
2933
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
2934
                  % len(i_non_a_balanced))
2935

    
2936
    if n_offline:
2937
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
2938

    
2939
    if n_drained:
2940
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
2941

    
2942
    return not self.bad
2943

    
2944
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
2945
    """Analyze the post-hooks' result
2946

2947
    This method analyses the hook result, handles it, and sends some
2948
    nicely-formatted feedback back to the user.
2949

2950
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
2951
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
2952
    @param hooks_results: the results of the multi-node hooks rpc call
2953
    @param feedback_fn: function used send feedback back to the caller
2954
    @param lu_result: previous Exec result
2955
    @return: the new Exec result, based on the previous result
2956
        and hook results
2957

2958
    """
2959
    # We only really run POST phase hooks, only for non-empty groups,
2960
    # and are only interested in their results
2961
    if not self.my_node_names:
2962
      # empty node group
2963
      pass
2964
    elif phase == constants.HOOKS_PHASE_POST:
2965
      # Used to change hooks' output to proper indentation
2966
      feedback_fn("* Hooks Results")
2967
      assert hooks_results, "invalid result from hooks"
2968

    
2969
      for node_name in hooks_results:
2970
        res = hooks_results[node_name]
2971
        msg = res.fail_msg
2972
        test = msg and not res.offline
2973
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
2974
                      "Communication failure in hooks execution: %s", msg)
2975
        if res.offline or msg:
2976
          # No need to investigate payload if node is offline or gave an error.
2977
          # override manually lu_result here as _ErrorIf only
2978
          # overrides self.bad
2979
          lu_result = 1
2980
          continue
2981
        for script, hkr, output in res.payload:
2982
          test = hkr == constants.HKR_FAIL
2983
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
2984
                        "Script %s failed, output:", script)
2985
          if test:
2986
            output = self._HOOKS_INDENT_RE.sub("      ", output)
2987
            feedback_fn("%s" % output)
2988
            lu_result = 0
2989

    
2990
    return lu_result
2991

    
2992

    
2993
class LUClusterVerifyDisks(NoHooksLU):
2994
  """Verifies the cluster disks status.
2995

2996
  """
2997
  REQ_BGL = False
2998

    
2999
  def ExpandNames(self):
3000
    self.share_locks = _ShareAll()
3001
    self.needed_locks = {
3002
      locking.LEVEL_NODEGROUP: locking.ALL_SET,
3003
      }
3004

    
3005
  def Exec(self, feedback_fn):
3006
    group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
3007

    
3008
    # Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
3009
    return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
3010
                           for group in group_names])
3011

    
3012

    
3013
class LUGroupVerifyDisks(NoHooksLU):
3014
  """Verifies the status of all disks in a node group.
3015

3016
  """
3017
  REQ_BGL = False
3018

    
3019
  def ExpandNames(self):
3020
    # Raises errors.OpPrereqError on its own if group can't be found
3021
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
3022

    
3023
    self.share_locks = _ShareAll()
3024
    self.needed_locks = {
3025
      locking.LEVEL_INSTANCE: [],
3026
      locking.LEVEL_NODEGROUP: [],
3027
      locking.LEVEL_NODE: [],
3028
      }
3029

    
3030
  def DeclareLocks(self, level):
3031
    if level == locking.LEVEL_INSTANCE:
3032
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
3033

    
3034
      # Lock instances optimistically, needs verification once node and group
3035
      # locks have been acquired
3036
      self.needed_locks[locking.LEVEL_INSTANCE] = \
3037
        self.cfg.GetNodeGroupInstances(self.group_uuid)
3038

    
3039
    elif level == locking.LEVEL_NODEGROUP:
3040
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3041

    
3042
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
3043
        set([self.group_uuid] +
3044
            # Lock all groups used by instances optimistically; this requires
3045
            # going via the node before it's locked, requiring verification
3046
            # later on
3047
            [group_uuid
3048
             for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
3049
             for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
3050

    
3051
    elif level == locking.LEVEL_NODE:
3052
      # This will only lock the nodes in the group to be verified which contain
3053
      # actual instances
3054
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3055
      self._LockInstancesNodes()
3056

    
3057
      # Lock all nodes in group to be verified
3058
      assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
3059
      member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
3060
      self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3061

    
3062
  def CheckPrereq(self):
3063
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3064
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3065
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3066

    
3067
    assert self.group_uuid in owned_groups
3068

    
3069
    # Check if locked instances are still correct
3070
    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
3071

    
3072
    # Get instance information
3073
    self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
3074

    
3075
    # Check if node groups for locked instances are still correct
3076
    for (instance_name, inst) in self.instances.items():
3077
      assert owned_nodes.issuperset(inst.all_nodes), \
3078
        "Instance %s's nodes changed while we kept the lock" % instance_name
3079

    
3080
      inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
3081
                                             owned_groups)
3082

    
3083
      assert self.group_uuid in inst_groups, \
3084
        "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
3085

    
3086
  def Exec(self, feedback_fn):
3087
    """Verify integrity of cluster disks.
3088

3089
    @rtype: tuple of three items
3090
    @return: a tuple of (dict of node-to-node_error, list of instances
3091
        which need activate-disks, dict of instance: (node, volume) for
3092
        missing volumes
3093

3094
    """
3095
    res_nodes = {}
3096
    res_instances = set()
3097
    res_missing = {}
3098

    
3099
    nv_dict = _MapInstanceDisksToNodes([inst
3100
                                        for inst in self.instances.values()
3101
                                        if inst.admin_up])
3102

    
3103
    if nv_dict:
3104
      nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
3105
                             set(self.cfg.GetVmCapableNodeList()))
3106

    
3107
      node_lvs = self.rpc.call_lv_list(nodes, [])
3108

    
3109
      for (node, node_res) in node_lvs.items():
3110
        if node_res.offline:
3111
          continue
3112

    
3113
        msg = node_res.fail_msg
3114
        if msg:
3115
          logging.warning("Error enumerating LVs on node %s: %s", node, msg)
3116
          res_nodes[node] = msg
3117
          continue
3118

    
3119
        for lv_name, (_, _, lv_online) in node_res.payload.items():
3120
          inst = nv_dict.pop((node, lv_name), None)
3121
          if not (lv_online or inst is None):
3122
            res_instances.add(inst)
3123

    
3124
      # any leftover items in nv_dict are missing LVs, let's arrange the data
3125
      # better
3126
      for key, inst in nv_dict.iteritems():
3127
        res_missing.setdefault(inst, []).append(key)
3128

    
3129
    return (res_nodes, list(res_instances), res_missing)
3130

    
3131

    
3132
class LUClusterRepairDiskSizes(NoHooksLU):
3133
  """Verifies the cluster disks sizes.
3134

3135
  """
3136
  REQ_BGL = False
3137

    
3138
  def ExpandNames(self):
3139
    if self.op.instances:
3140
      self.wanted_names = _GetWantedInstances(self, self.op.instances)
3141
      self.needed_locks = {
3142
        locking.LEVEL_NODE: [],
3143
        locking.LEVEL_INSTANCE: self.wanted_names,
3144
        }
3145
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3146
    else:
3147
      self.wanted_names = None
3148
      self.needed_locks = {
3149
        locking.LEVEL_NODE: locking.ALL_SET,
3150
        locking.LEVEL_INSTANCE: locking.ALL_SET,
3151
        }
3152
    self.share_locks = _ShareAll()
3153

    
3154
  def DeclareLocks(self, level):
3155
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
3156
      self._LockInstancesNodes(primary_only=True)
3157

    
3158
  def CheckPrereq(self):
3159
    """Check prerequisites.
3160

3161
    This only checks the optional instance list against the existing names.
3162

3163
    """
3164
    if self.wanted_names is None:
3165
      self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
3166

    
3167
    self.wanted_instances = \
3168
        map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
3169

    
3170
  def _EnsureChildSizes(self, disk):
3171
    """Ensure children of the disk have the needed disk size.
3172

3173
    This is valid mainly for DRBD8 and fixes an issue where the
3174
    children have smaller disk size.
3175

3176
    @param disk: an L{ganeti.objects.Disk} object
3177

3178
    """
3179
    if disk.dev_type == constants.LD_DRBD8:
3180
      assert disk.children, "Empty children for DRBD8?"
3181
      fchild = disk.children[0]
3182
      mismatch = fchild.size < disk.size
3183
      if mismatch:
3184
        self.LogInfo("Child disk has size %d, parent %d, fixing",
3185
                     fchild.size, disk.size)
3186
        fchild.size = disk.size
3187

    
3188
      # and we recurse on this child only, not on the metadev
3189
      return self._EnsureChildSizes(fchild) or mismatch
3190
    else:
3191
      return False
3192

    
3193
  def Exec(self, feedback_fn):
3194
    """Verify the size of cluster disks.
3195

3196
    """
3197
    # TODO: check child disks too
3198
    # TODO: check differences in size between primary/secondary nodes
3199
    per_node_disks = {}
3200
    for instance in self.wanted_instances:
3201
      pnode = instance.primary_node
3202
      if pnode not in per_node_disks:
3203
        per_node_disks[pnode] = []
3204
      for idx, disk in enumerate(instance.disks):
3205
        per_node_disks[pnode].append((instance, idx, disk))
3206

    
3207
    changed = []
3208
    for node, dskl in per_node_disks.items():
3209
      newl = [v[2].Copy() for v in dskl]
3210
      for dsk in newl:
3211
        self.cfg.SetDiskID(dsk, node)
3212
      result = self.rpc.call_blockdev_getsize(node, newl)
3213
      if result.fail_msg:
3214
        self.LogWarning("Failure in blockdev_getsize call to node"
3215
                        " %s, ignoring", node)
3216
        continue
3217
      if len(result.payload) != len(dskl):
3218
        logging.warning("Invalid result from node %s: len(dksl)=%d,"
3219
                        " result.payload=%s", node, len(dskl), result.payload)
3220
        self.LogWarning("Invalid result from node %s, ignoring node results",
3221
                        node)
3222
        continue
3223
      for ((instance, idx, disk), size) in zip(dskl, result.payload):
3224
        if size is None:
3225
          self.LogWarning("Disk %d of instance %s did not return size"
3226
                          " information, ignoring", idx, instance.name)
3227
          continue
3228
        if not isinstance(size, (int, long)):
3229
          self.LogWarning("Disk %d of instance %s did not return valid"
3230
                          " size information, ignoring", idx, instance.name)
3231
          continue
3232
        size = size >> 20
3233
        if size != disk.size:
3234
          self.LogInfo("Disk %d of instance %s has mismatched size,"
3235
                       " correcting: recorded %d, actual %d", idx,
3236
                       instance.name, disk.size, size)
3237
          disk.size = size
3238
          self.cfg.Update(instance, feedback_fn)
3239
          changed.append((instance.name, idx, size))
3240
        if self._EnsureChildSizes(disk):
3241
          self.cfg.Update(instance, feedback_fn)
3242
          changed.append((instance.name, idx, disk.size))
3243
    return changed
3244

    
3245

    
3246
class LUClusterRename(LogicalUnit):
3247
  """Rename the cluster.
3248

3249
  """
3250
  HPATH = "cluster-rename"
3251
  HTYPE = constants.HTYPE_CLUSTER
3252

    
3253
  def BuildHooksEnv(self):
3254
    """Build hooks env.
3255

3256
    """
3257
    return {
3258
      "OP_TARGET": self.cfg.GetClusterName(),
3259
      "NEW_NAME": self.op.name,
3260
      }
3261

    
3262
  def BuildHooksNodes(self):
3263
    """Build hooks nodes.
3264

3265
    """
3266
    return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList())
3267

    
3268
  def CheckPrereq(self):
3269
    """Verify that the passed name is a valid one.
3270

3271
    """
3272
    hostname = netutils.GetHostname(name=self.op.name,
3273
                                    family=self.cfg.GetPrimaryIPFamily())
3274

    
3275
    new_name = hostname.name
3276
    self.ip = new_ip = hostname.ip
3277
    old_name = self.cfg.GetClusterName()
3278
    old_ip = self.cfg.GetMasterIP()
3279
    if new_name == old_name and new_ip == old_ip:
3280
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
3281
                                 " cluster has changed",
3282
                                 errors.ECODE_INVAL)
3283
    if new_ip != old_ip:
3284
      if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
3285
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
3286
                                   " reachable on the network" %
3287
                                   new_ip, errors.ECODE_NOTUNIQUE)
3288

    
3289
    self.op.name = new_name
3290

    
3291
  def Exec(self, feedback_fn):
3292
    """Rename the cluster.
3293

3294
    """
3295
    clustername = self.op.name
3296
    ip = self.ip
3297

    
3298
    # shutdown the master IP
3299
    master = self.cfg.GetMasterNode()
3300
    result = self.rpc.call_node_stop_master(master, False)
3301
    result.Raise("Could not disable the master role")
3302

    
3303
    try:
3304
      cluster = self.cfg.GetClusterInfo()
3305
      cluster.cluster_name = clustername
3306
      cluster.master_ip = ip
3307
      self.cfg.Update(cluster, feedback_fn)
3308

    
3309
      # update the known hosts file
3310
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
3311
      node_list = self.cfg.GetOnlineNodeList()
3312
      try:
3313
        node_list.remove(master)
3314
      except ValueError:
3315
        pass
3316
      _UploadHelper(self, node_list, constants.SSH_KNOWN_HOSTS_FILE)
3317
    finally:
3318
      result = self.rpc.call_node_start_master(master, False, False)
3319
      msg = result.fail_msg
3320
      if msg:
3321
        self.LogWarning("Could not re-enable the master role on"
3322
                        " the master, please restart manually: %s", msg)
3323

    
3324
    return clustername
3325

    
3326

    
3327
class LUClusterSetParams(LogicalUnit):
3328
  """Change the parameters of the cluster.
3329

3330
  """
3331
  HPATH = "cluster-modify"
3332
  HTYPE = constants.HTYPE_CLUSTER
3333
  REQ_BGL = False
3334

    
3335
  def CheckArguments(self):
3336
    """Check parameters
3337

3338
    """
3339
    if self.op.uid_pool:
3340
      uidpool.CheckUidPool(self.op.uid_pool)
3341

    
3342
    if self.op.add_uids:
3343
      uidpool.CheckUidPool(self.op.add_uids)
3344

    
3345
    if self.op.remove_uids:
3346
      uidpool.CheckUidPool(self.op.remove_uids)
3347

    
3348
  def ExpandNames(self):
3349
    # FIXME: in the future maybe other cluster params won't require checking on
3350
    # all nodes to be modified.
3351
    self.needed_locks = {
3352
      locking.LEVEL_NODE: locking.ALL_SET,
3353
    }
3354
    self.share_locks[locking.LEVEL_NODE] = 1
3355

    
3356
  def BuildHooksEnv(self):
3357
    """Build hooks env.
3358

3359
    """
3360
    return {
3361
      "OP_TARGET": self.cfg.GetClusterName(),
3362
      "NEW_VG_NAME": self.op.vg_name,
3363
      }
3364

    
3365
  def BuildHooksNodes(self):
3366
    """Build hooks nodes.
3367

3368
    """
3369
    mn = self.cfg.GetMasterNode()
3370
    return ([mn], [mn])
3371

    
3372
  def CheckPrereq(self):
3373
    """Check prerequisites.
3374

3375
    This checks whether the given params don't conflict and
3376
    if the given volume group is valid.
3377

3378
    """
3379
    if self.op.vg_name is not None and not self.op.vg_name:
3380
      if self.cfg.HasAnyDiskOfType(constants.LD_LV):
3381
        raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
3382
                                   " instances exist", errors.ECODE_INVAL)
3383

    
3384
    if self.op.drbd_helper is not None and not self.op.drbd_helper:
3385
      if self.cfg.HasAnyDiskOfType(constants.LD_DRBD8):
3386
        raise errors.OpPrereqError("Cannot disable drbd helper while"
3387
                                   " drbd-based instances exist",
3388
                                   errors.ECODE_INVAL)
3389

    
3390
    node_list = self.owned_locks(locking.LEVEL_NODE)
3391

    
3392
    # if vg_name not None, checks given volume group on all nodes
3393
    if self.op.vg_name:
3394
      vglist = self.rpc.call_vg_list(node_list)
3395
      for node in node_list:
3396
        msg = vglist[node].fail_msg
3397
        if msg:
3398
          # ignoring down node
3399
          self.LogWarning("Error while gathering data on node %s"
3400
                          " (ignoring node): %s", node, msg)
3401
          continue
3402
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
3403
                                              self.op.vg_name,
3404
                                              constants.MIN_VG_SIZE)
3405
        if vgstatus:
3406
          raise errors.OpPrereqError("Error on node '%s': %s" %
3407
                                     (node, vgstatus), errors.ECODE_ENVIRON)
3408

    
3409
    if self.op.drbd_helper:
3410
      # checks given drbd helper on all nodes
3411
      helpers = self.rpc.call_drbd_helper(node_list)
3412
      for (node, ninfo) in self.cfg.GetMultiNodeInfo(node_list):
3413
        if ninfo.offline:
3414
          self.LogInfo("Not checking drbd helper on offline node %s", node)
3415
          continue
3416
        msg = helpers[node].fail_msg
3417
        if msg:
3418
          raise errors.OpPrereqError("Error checking drbd helper on node"
3419
                                     " '%s': %s" % (node, msg),
3420
                                     errors.ECODE_ENVIRON)
3421
        node_helper = helpers[node].payload
3422
        if node_helper != self.op.drbd_helper:
3423
          raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
3424
                                     (node, node_helper), errors.ECODE_ENVIRON)
3425

    
3426
    self.cluster = cluster = self.cfg.GetClusterInfo()
3427
    # validate params changes
3428
    if self.op.beparams:
3429
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
3430
      self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
3431

    
3432
    if self.op.ndparams:
3433
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
3434
      self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
3435

    
3436
      # TODO: we need a more general way to handle resetting
3437
      # cluster-level parameters to default values
3438
      if self.new_ndparams["oob_program"] == "":
3439
        self.new_ndparams["oob_program"] = \
3440
            constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
3441

    
3442
    if self.op.nicparams:
3443
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
3444
      self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
3445
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
3446
      nic_errors = []
3447

    
3448
      # check all instances for consistency
3449
      for instance in self.cfg.GetAllInstancesInfo().values():
3450
        for nic_idx, nic in enumerate(instance.nics):
3451
          params_copy = copy.deepcopy(nic.nicparams)
3452
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
3453

    
3454
          # check parameter syntax
3455
          try:
3456
            objects.NIC.CheckParameterSyntax(params_filled)
3457
          except errors.ConfigurationError, err:
3458
            nic_errors.append("Instance %s, nic/%d: %s" %
3459
                              (instance.name, nic_idx, err))
3460

    
3461
          # if we're moving instances to routed, check that they have an ip
3462
          target_mode = params_filled[constants.NIC_MODE]
3463
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
3464
            nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
3465
                              " address" % (instance.name, nic_idx))
3466
      if nic_errors:
3467
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
3468
                                   "\n".join(nic_errors))
3469

    
3470
    # hypervisor list/parameters
3471
    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
3472
    if self.op.hvparams:
3473
      for hv_name, hv_dict in self.op.hvparams.items():
3474
        if hv_name not in self.new_hvparams:
3475
          self.new_hvparams[hv_name] = hv_dict
3476
        else:
3477
          self.new_hvparams[hv_name].update(hv_dict)
3478

    
3479
    # os hypervisor parameters
3480
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
3481
    if self.op.os_hvp:
3482
      for os_name, hvs in self.op.os_hvp.items():
3483
        if os_name not in self.new_os_hvp:
3484
          self.new_os_hvp[os_name] = hvs
3485
        else:
3486
          for hv_name, hv_dict in hvs.items():
3487
            if hv_name not in self.new_os_hvp[os_name]:
3488
              self.new_os_hvp[os_name][hv_name] = hv_dict
3489
            else:
3490
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
3491

    
3492
    # os parameters
3493
    self.new_osp = objects.FillDict(cluster.osparams, {})
3494
    if self.op.osparams:
3495
      for os_name, osp in self.op.osparams.items():
3496
        if os_name not in self.new_osp:
3497
          self.new_osp[os_name] = {}
3498

    
3499
        self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
3500
                                                  use_none=True)
3501

    
3502
        if not self.new_osp[os_name]:
3503
          # we removed all parameters
3504
          del self.new_osp[os_name]
3505
        else:
3506
          # check the parameter validity (remote check)
3507
          _CheckOSParams(self, False, [self.cfg.GetMasterNode()],
3508
                         os_name, self.new_osp[os_name])
3509

    
3510
    # changes to the hypervisor list
3511
    if self.op.enabled_hypervisors is not None:
3512
      self.hv_list = self.op.enabled_hypervisors
3513
      for hv in self.hv_list:
3514
        # if the hypervisor doesn't already exist in the cluster
3515
        # hvparams, we initialize it to empty, and then (in both
3516
        # cases) we make sure to fill the defaults, as we might not
3517
        # have a complete defaults list if the hypervisor wasn't
3518
        # enabled before
3519
        if hv not in new_hvp:
3520
          new_hvp[hv] = {}
3521
        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
3522
        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
3523
    else:
3524
      self.hv_list = cluster.enabled_hypervisors
3525

    
3526
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
3527
      # either the enabled list has changed, or the parameters have, validate
3528
      for hv_name, hv_params in self.new_hvparams.items():
3529
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
3530
            (self.op.enabled_hypervisors and
3531
             hv_name in self.op.enabled_hypervisors)):
3532
          # either this is a new hypervisor, or its parameters have changed
3533
          hv_class = hypervisor.GetHypervisor(hv_name)
3534
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
3535
          hv_class.CheckParameterSyntax(hv_params)
3536
          _CheckHVParams(self, node_list, hv_name, hv_params)
3537

    
3538
    if self.op.os_hvp:
3539
      # no need to check any newly-enabled hypervisors, since the
3540
      # defaults have already been checked in the above code-block
3541
      for os_name, os_hvp in self.new_os_hvp.items():
3542
        for hv_name, hv_params in os_hvp.items():
3543
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
3544
          # we need to fill in the new os_hvp on top of the actual hv_p
3545
          cluster_defaults = self.new_hvparams.get(hv_name, {})
3546
          new_osp = objects.FillDict(cluster_defaults, hv_params)
3547
          hv_class = hypervisor.GetHypervisor(hv_name)
3548
          hv_class.CheckParameterSyntax(new_osp)
3549
          _CheckHVParams(self, node_list, hv_name, new_osp)
3550

    
3551
    if self.op.default_iallocator:
3552
      alloc_script = utils.FindFile(self.op.default_iallocator,
3553
                                    constants.IALLOCATOR_SEARCH_PATH,
3554
                                    os.path.isfile)
3555
      if alloc_script is None:
3556
        raise errors.OpPrereqError("Invalid default iallocator script '%s'"
3557
                                   " specified" % self.op.default_iallocator,
3558
                                   errors.ECODE_INVAL)
3559

    
3560
  def Exec(self, feedback_fn):
3561
    """Change the parameters of the cluster.
3562

3563
    """
3564
    if self.op.vg_name is not None:
3565
      new_volume = self.op.vg_name
3566
      if not new_volume:
3567
        new_volume = None
3568
      if new_volume != self.cfg.GetVGName():
3569
        self.cfg.SetVGName(new_volume)
3570
      else:
3571
        feedback_fn("Cluster LVM configuration already in desired"
3572
                    " state, not changing")
3573
    if self.op.drbd_helper is not None:
3574
      new_helper = self.op.drbd_helper
3575
      if not new_helper:
3576
        new_helper = None
3577
      if new_helper != self.cfg.GetDRBDHelper():
3578
        self.cfg.SetDRBDHelper(new_helper)
3579
      else:
3580
        feedback_fn("Cluster DRBD helper already in desired state,"
3581
                    " not changing")
3582
    if self.op.hvparams:
3583
      self.cluster.hvparams = self.new_hvparams
3584
    if self.op.os_hvp:
3585
      self.cluster.os_hvp = self.new_os_hvp
3586
    if self.op.enabled_hypervisors is not None:
3587
      self.cluster.hvparams = self.new_hvparams
3588
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
3589
    if self.op.beparams:
3590
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
3591
    if self.op.nicparams:
3592
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
3593
    if self.op.osparams:
3594
      self.cluster.osparams = self.new_osp
3595
    if self.op.ndparams:
3596
      self.cluster.ndparams = self.new_ndparams
3597

    
3598
    if self.op.candidate_pool_size is not None:
3599
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
3600
      # we need to update the pool size here, otherwise the save will fail
3601
      _AdjustCandidatePool(self, [])
3602

    
3603
    if self.op.maintain_node_health is not None:
3604
      self.cluster.maintain_node_health = self.op.maintain_node_health
3605

    
3606
    if self.op.prealloc_wipe_disks is not None:
3607
      self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
3608

    
3609
    if self.op.add_uids is not None:
3610
      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
3611

    
3612
    if self.op.remove_uids is not None:
3613
      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
3614

    
3615
    if self.op.uid_pool is not None:
3616
      self.cluster.uid_pool = self.op.uid_pool
3617

    
3618
    if self.op.default_iallocator is not None:
3619
      self.cluster.default_iallocator = self.op.default_iallocator
3620

    
3621
    if self.op.reserved_lvs is not None:
3622
      self.cluster.reserved_lvs = self.op.reserved_lvs
3623

    
3624
    def helper_os(aname, mods, desc):
3625
      desc += " OS list"
3626
      lst = getattr(self.cluster, aname)
3627
      for key, val in mods:
3628
        if key == constants.DDM_ADD:
3629
          if val in lst:
3630
            feedback_fn("OS %s already in %s, ignoring" % (val, desc))
3631
          else:
3632
            lst.append(val)
3633
        elif key == constants.DDM_REMOVE:
3634
          if val in lst:
3635
            lst.remove(val)
3636
          else:
3637
            feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
3638
        else:
3639
          raise errors.ProgrammerError("Invalid modification '%s'" % key)
3640

    
3641
    if self.op.hidden_os:
3642
      helper_os("hidden_os", self.op.hidden_os, "hidden")
3643

    
3644
    if self.op.blacklisted_os:
3645
      helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
3646

    
3647
    if self.op.master_netdev:
3648
      master = self.cfg.GetMasterNode()
3649
      feedback_fn("Shutting down master ip on the current netdev (%s)" %
3650
                  self.cluster.master_netdev)
3651
      result = self.rpc.call_node_stop_master(master, False)
3652
      result.Raise("Could not disable the master ip")
3653
      feedback_fn("Changing master_netdev from %s to %s" %
3654
                  (self.cluster.master_netdev, self.op.master_netdev))
3655
      self.cluster.master_netdev = self.op.master_netdev
3656

    
3657
    self.cfg.Update(self.cluster, feedback_fn)
3658

    
3659
    if self.op.master_netdev:
3660
      feedback_fn("Starting the master ip on the new master netdev (%s)" %
3661
                  self.op.master_netdev)
3662
      result = self.rpc.call_node_start_master(master, False, False)
3663
      if result.fail_msg:
3664
        self.LogWarning("Could not re-enable the master ip on"
3665
                        " the master, please restart manually: %s",
3666
                        result.fail_msg)
3667

    
3668

    
3669
def _UploadHelper(lu, nodes, fname):
3670
  """Helper for uploading a file and showing warnings.
3671

3672
  """
3673
  if os.path.exists(fname):
3674
    result = lu.rpc.call_upload_file(nodes, fname)
3675
    for to_node, to_result in result.items():
3676
      msg = to_result.fail_msg
3677
      if msg:
3678
        msg = ("Copy of file %s to node %s failed: %s" %
3679
               (fname, to_node, msg))
3680
        lu.proc.LogWarning(msg)
3681

    
3682

    
3683
def _ComputeAncillaryFiles(cluster, redist):
3684
  """Compute files external to Ganeti which need to be consistent.
3685

3686
  @type redist: boolean
3687
  @param redist: Whether to include files which need to be redistributed
3688

3689
  """
3690
  # Compute files for all nodes
3691
  files_all = set([
3692
    constants.SSH_KNOWN_HOSTS_FILE,
3693
    constants.CONFD_HMAC_KEY,
3694
    constants.CLUSTER_DOMAIN_SECRET_FILE,
3695
    ])
3696

    
3697
  if not redist:
3698
    files_all.update(constants.ALL_CERT_FILES)
3699
    files_all.update(ssconf.SimpleStore().GetFileList())
3700

    
3701
  if cluster.modify_etc_hosts:
3702
    files_all.add(constants.ETC_HOSTS)
3703

    
3704
  # Files which must either exist on all nodes or on none
3705
  files_all_opt = set([
3706
    constants.RAPI_USERS_FILE,
3707
    ])
3708

    
3709
  # Files which should only be on master candidates
3710
  files_mc = set()
3711
  if not redist:
3712
    files_mc.add(constants.CLUSTER_CONF_FILE)
3713

    
3714
  # Files which should only be on VM-capable nodes
3715
  files_vm = set(filename
3716
    for hv_name in cluster.enabled_hypervisors
3717
    for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles())
3718

    
3719
  # Filenames must be unique
3720
  assert (len(files_all | files_all_opt | files_mc | files_vm) ==
3721
          sum(map(len, [files_all, files_all_opt, files_mc, files_vm]))), \
3722
         "Found file listed in more than one file list"
3723

    
3724
  return (files_all, files_all_opt, files_mc, files_vm)
3725

    
3726

    
3727
def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
3728
  """Distribute additional files which are part of the cluster configuration.
3729

3730
  ConfigWriter takes care of distributing the config and ssconf files, but
3731
  there are more files which should be distributed to all nodes. This function
3732
  makes sure those are copied.
3733

3734
  @param lu: calling logical unit
3735
  @param additional_nodes: list of nodes not in the config to distribute to
3736
  @type additional_vm: boolean
3737
  @param additional_vm: whether the additional nodes are vm-capable or not
3738

3739
  """
3740
  # Gather target nodes
3741
  cluster = lu.cfg.GetClusterInfo()
3742
  master_info = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
3743

    
3744
  online_nodes = lu.cfg.GetOnlineNodeList()
3745
  vm_nodes = lu.cfg.GetVmCapableNodeList()
3746

    
3747
  if additional_nodes is not None:
3748
    online_nodes.extend(additional_nodes)
3749
    if additional_vm:
3750
      vm_nodes.extend(additional_nodes)
3751

    
3752
  # Never distribute to master node
3753
  for nodelist in [online_nodes, vm_nodes]:
3754
    if master_info.name in nodelist:
3755
      nodelist.remove(master_info.name)
3756

    
3757
  # Gather file lists
3758
  (files_all, files_all_opt, files_mc, files_vm) = \
3759
    _ComputeAncillaryFiles(cluster, True)
3760

    
3761
  # Never re-distribute configuration file from here
3762
  assert not (constants.CLUSTER_CONF_FILE in files_all or
3763
              constants.CLUSTER_CONF_FILE in files_vm)
3764
  assert not files_mc, "Master candidates not handled in this function"
3765

    
3766
  filemap = [
3767
    (online_nodes, files_all),
3768
    (online_nodes, files_all_opt),
3769
    (vm_nodes, files_vm),
3770
    ]
3771

    
3772
  # Upload the files
3773
  for (node_list, files) in filemap:
3774
    for fname in files:
3775
      _UploadHelper(lu, node_list, fname)
3776

    
3777

    
3778
class LUClusterRedistConf(NoHooksLU):
3779
  """Force the redistribution of cluster configuration.
3780

3781
  This is a very simple LU.
3782

3783
  """
3784
  REQ_BGL = False
3785

    
3786
  def ExpandNames(self):
3787
    self.needed_locks = {
3788
      locking.LEVEL_NODE: locking.ALL_SET,
3789
    }
3790
    self.share_locks[locking.LEVEL_NODE] = 1
3791

    
3792
  def Exec(self, feedback_fn):
3793
    """Redistribute the configuration.
3794

3795
    """
3796
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
3797
    _RedistributeAncillaryFiles(self)
3798

    
3799

    
3800
def _WaitForSync(lu, instance, disks=None, oneshot=False):
3801
  """Sleep and poll for an instance's disk to sync.
3802

3803
  """
3804
  if not instance.disks or disks is not None and not disks:
3805
    return True
3806

    
3807
  disks = _ExpandCheckDisks(instance, disks)
3808

    
3809
  if not oneshot:
3810
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
3811

    
3812
  node = instance.primary_node
3813

    
3814
  for dev in disks:
3815
    lu.cfg.SetDiskID(dev, node)
3816

    
3817
  # TODO: Convert to utils.Retry
3818

    
3819
  retries = 0
3820
  degr_retries = 10 # in seconds, as we sleep 1 second each time
3821
  while True:
3822
    max_time = 0
3823
    done = True
3824
    cumul_degraded = False
3825
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, disks)
3826
    msg = rstats.fail_msg
3827
    if msg:
3828
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
3829
      retries += 1
3830
      if retries >= 10:
3831
        raise errors.RemoteError("Can't contact node %s for mirror data,"
3832
                                 " aborting." % node)
3833
      time.sleep(6)
3834
      continue
3835
    rstats = rstats.payload
3836
    retries = 0
3837
    for i, mstat in enumerate(rstats):
3838
      if mstat is None:
3839
        lu.LogWarning("Can't compute data for node %s/%s",
3840
                           node, disks[i].iv_name)
3841
        continue
3842

    
3843
      cumul_degraded = (cumul_degraded or
3844
                        (mstat.is_degraded and mstat.sync_percent is None))
3845
      if mstat.sync_percent is not None:
3846
        done = False
3847
        if mstat.estimated_time is not None:
3848
          rem_time = ("%s remaining (estimated)" %
3849
                      utils.FormatSeconds(mstat.estimated_time))
3850
          max_time = mstat.estimated_time
3851
        else:
3852
          rem_time = "no time estimate"
3853
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
3854
                        (disks[i].iv_name, mstat.sync_percent, rem_time))
3855

    
3856
    # if we're done but degraded, let's do a few small retries, to
3857
    # make sure we see a stable and not transient situation; therefore
3858
    # we force restart of the loop
3859
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
3860
      logging.info("Degraded disks found, %d retries left", degr_retries)
3861
      degr_retries -= 1
3862
      time.sleep(1)
3863
      continue
3864

    
3865
    if done or oneshot:
3866
      break
3867

    
3868
    time.sleep(min(60, max_time))
3869

    
3870
  if done:
3871
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
3872
  return not cumul_degraded
3873

    
3874

    
3875
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
3876
  """Check that mirrors are not degraded.
3877

3878
  The ldisk parameter, if True, will change the test from the
3879
  is_degraded attribute (which represents overall non-ok status for
3880
  the device(s)) to the ldisk (representing the local storage status).
3881

3882
  """
3883
  lu.cfg.SetDiskID(dev, node)
3884

    
3885
  result = True
3886

    
3887
  if on_primary or dev.AssembleOnSecondary():
3888
    rstats = lu.rpc.call_blockdev_find(node, dev)
3889
    msg = rstats.fail_msg
3890
    if msg:
3891
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
3892
      result = False
3893
    elif not rstats.payload:
3894
      lu.LogWarning("Can't find disk on node %s", node)
3895
      result = False
3896
    else:
3897
      if ldisk:
3898
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
3899
      else:
3900
        result = result and not rstats.payload.is_degraded
3901

    
3902
  if dev.children:
3903
    for child in dev.children:
3904
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
3905

    
3906
  return result
3907

    
3908

    
3909
class LUOobCommand(NoHooksLU):
3910
  """Logical unit for OOB handling.
3911

3912
  """
3913
  REG_BGL = False
3914
  _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
3915

    
3916
  def ExpandNames(self):
3917
    """Gather locks we need.
3918

3919
    """
3920
    if self.op.node_names:
3921
      self.op.node_names = _GetWantedNodes(self, self.op.node_names)
3922
      lock_names = self.op.node_names
3923
    else:
3924
      lock_names = locking.ALL_SET
3925

    
3926
    self.needed_locks = {
3927
      locking.LEVEL_NODE: lock_names,
3928
      }
3929

    
3930
  def CheckPrereq(self):
3931
    """Check prerequisites.
3932

3933
    This checks:
3934
     - the node exists in the configuration
3935
     - OOB is supported
3936

3937
    Any errors are signaled by raising errors.OpPrereqError.
3938

3939
    """
3940
    self.nodes = []
3941
    self.master_node = self.cfg.GetMasterNode()
3942

    
3943
    assert self.op.power_delay >= 0.0
3944

    
3945
    if self.op.node_names:
3946
      if (self.op.command in self._SKIP_MASTER and
3947
          self.master_node in self.op.node_names):
3948
        master_node_obj = self.cfg.GetNodeInfo(self.master_node)
3949
        master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
3950

    
3951
        if master_oob_handler:
3952
          additional_text = ("run '%s %s %s' if you want to operate on the"
3953
                             " master regardless") % (master_oob_handler,
3954
                                                      self.op.command,
3955
                                                      self.master_node)
3956
        else:
3957
          additional_text = "it does not support out-of-band operations"
3958

    
3959
        raise errors.OpPrereqError(("Operating on the master node %s is not"
3960
                                    " allowed for %s; %s") %
3961
                                   (self.master_node, self.op.command,
3962
                                    additional_text), errors.ECODE_INVAL)
3963
    else:
3964
      self.op.node_names = self.cfg.GetNodeList()
3965
      if self.op.command in self._SKIP_MASTER:
3966
        self.op.node_names.remove(self.master_node)
3967

    
3968
    if self.op.command in self._SKIP_MASTER:
3969
      assert self.master_node not in self.op.node_names
3970

    
3971
    for (node_name, node) in self.cfg.GetMultiNodeInfo(self.op.node_names):
3972
      if node is None:
3973
        raise errors.OpPrereqError("Node %s not found" % node_name,
3974
                                   errors.ECODE_NOENT)
3975
      else:
3976
        self.nodes.append(node)
3977

    
3978
      if (not self.op.ignore_status and
3979
          (self.op.command == constants.OOB_POWER_OFF and not node.offline)):
3980
        raise errors.OpPrereqError(("Cannot power off node %s because it is"
3981
                                    " not marked offline") % node_name,
3982
                                   errors.ECODE_STATE)
3983

    
3984
  def Exec(self, feedback_fn):
3985
    """Execute OOB and return result if we expect any.
3986

3987
    """
3988
    master_node = self.master_node
3989
    ret = []
3990

    
3991
    for idx, node in enumerate(utils.NiceSort(self.nodes,
3992
                                              key=lambda node: node.name)):
3993
      node_entry = [(constants.RS_NORMAL, node.name)]
3994
      ret.append(node_entry)
3995

    
3996
      oob_program = _SupportsOob(self.cfg, node)
3997

    
3998
      if not oob_program:
3999
        node_entry.append((constants.RS_UNAVAIL, None))
4000
        continue
4001

    
4002
      logging.info("Executing out-of-band command '%s' using '%s' on %s",
4003
                   self.op.command, oob_program, node.name)
4004
      result = self.rpc.call_run_oob(master_node, oob_program,
4005
                                     self.op.command, node.name,
4006
                                     self.op.timeout)
4007

    
4008
      if result.fail_msg:
4009
        self.LogWarning("Out-of-band RPC failed on node '%s': %s",
4010
                        node.name, result.fail_msg)
4011
        node_entry.append((constants.RS_NODATA, None))
4012
      else:
4013
        try:
4014
          self._CheckPayload(result)
4015
        except errors.OpExecError, err:
4016
          self.LogWarning("Payload returned by node '%s' is not valid: %s",
4017
                          node.name, err)
4018
          node_entry.append((constants.RS_NODATA, None))
4019
        else:
4020
          if self.op.command == constants.OOB_HEALTH:
4021
            # For health we should log important events
4022
            for item, status in result.payload:
4023
              if status in [constants.OOB_STATUS_WARNING,
4024
                            constants.OOB_STATUS_CRITICAL]:
4025
                self.LogWarning("Item '%s' on node '%s' has status '%s'",
4026
                                item, node.name, status)
4027

    
4028
          if self.op.command == constants.OOB_POWER_ON:
4029
            node.powered = True
4030
          elif self.op.command == constants.OOB_POWER_OFF:
4031
            node.powered = False
4032
          elif self.op.command == constants.OOB_POWER_STATUS:
4033
            powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
4034
            if powered != node.powered:
4035
              logging.warning(("Recorded power state (%s) of node '%s' does not"
4036
                               " match actual power state (%s)"), node.powered,
4037
                              node.name, powered)
4038

    
4039
          # For configuration changing commands we should update the node
4040
          if self.op.command in (constants.OOB_POWER_ON,
4041
                                 constants.OOB_POWER_OFF):
4042
            self.cfg.Update(node, feedback_fn)
4043

    
4044
          node_entry.append((constants.RS_NORMAL, result.payload))
4045

    
4046
          if (self.op.command == constants.OOB_POWER_ON and
4047
              idx < len(self.nodes) - 1):
4048
            time.sleep(self.op.power_delay)
4049

    
4050
    return ret
4051

    
4052
  def _CheckPayload(self, result):
4053
    """Checks if the payload is valid.
4054

4055
    @param result: RPC result
4056
    @raises errors.OpExecError: If payload is not valid
4057

4058
    """
4059
    errs = []
4060
    if self.op.command == constants.OOB_HEALTH:
4061
      if not isinstance(result.payload, list):
4062
        errs.append("command 'health' is expected to return a list but got %s" %
4063
                    type(result.payload))
4064
      else:
4065
        for item, status in result.payload:
4066
          if status not in constants.OOB_STATUSES:
4067
            errs.append("health item '%s' has invalid status '%s'" %
4068
                        (item, status))
4069

    
4070
    if self.op.command == constants.OOB_POWER_STATUS:
4071
      if not isinstance(result.payload, dict):
4072
        errs.append("power-status is expected to return a dict but got %s" %
4073
                    type(result.payload))
4074

    
4075
    if self.op.command in [
4076
        constants.OOB_POWER_ON,
4077
        constants.OOB_POWER_OFF,
4078
        constants.OOB_POWER_CYCLE,
4079
        ]:
4080
      if result.payload is not None:
4081
        errs.append("%s is expected to not return payload but got '%s'" %
4082
                    (self.op.command, result.payload))
4083

    
4084
    if errs:
4085
      raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
4086
                               utils.CommaJoin(errs))
4087

    
4088

    
4089
class _OsQuery(_QueryBase):
4090
  FIELDS = query.OS_FIELDS
4091

    
4092
  def ExpandNames(self, lu):
4093
    # Lock all nodes in shared mode
4094
    # Temporary removal of locks, should be reverted later
4095
    # TODO: reintroduce locks when they are lighter-weight
4096
    lu.needed_locks = {}
4097
    #self.share_locks[locking.LEVEL_NODE] = 1
4098
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4099

    
4100
    # The following variables interact with _QueryBase._GetNames
4101
    if self.names:
4102
      self.wanted = self.names
4103
    else:
4104
      self.wanted = locking.ALL_SET
4105

    
4106
    self.do_locking = self.use_locking
4107

    
4108
  def DeclareLocks(self, lu, level):
4109
    pass
4110

    
4111
  @staticmethod
4112
  def _DiagnoseByOS(rlist):
4113
    """Remaps a per-node return list into an a per-os per-node dictionary
4114

4115
    @param rlist: a map with node names as keys and OS objects as values
4116

4117
    @rtype: dict
4118
    @return: a dictionary with osnames as keys and as value another
4119
        map, with nodes as keys and tuples of (path, status, diagnose,
4120
        variants, parameters, api_versions) as values, eg::
4121

4122
          {"debian-etch": {"node1": [(/usr/lib/..., True, "", [], []),
4123
                                     (/srv/..., False, "invalid api")],
4124
                           "node2": [(/srv/..., True, "", [], [])]}
4125
          }
4126

4127
    """
4128
    all_os = {}
4129
    # we build here the list of nodes that didn't fail the RPC (at RPC
4130
    # level), so that nodes with a non-responding node daemon don't
4131
    # make all OSes invalid
4132
    good_nodes = [node_name for node_name in rlist
4133
                  if not rlist[node_name].fail_msg]
4134
    for node_name, nr in rlist.items():
4135
      if nr.fail_msg or not nr.payload:
4136
        continue
4137
      for (name, path, status, diagnose, variants,
4138
           params, api_versions) in nr.payload:
4139
        if name not in all_os:
4140
          # build a list of nodes for this os containing empty lists
4141
          # for each node in node_list
4142
          all_os[name] = {}
4143
          for nname in good_nodes:
4144
            all_os[name][nname] = []
4145
        # convert params from [name, help] to (name, help)
4146
        params = [tuple(v) for v in params]
4147
        all_os[name][node_name].append((path, status, diagnose,
4148
                                        variants, params, api_versions))
4149
    return all_os
4150

    
4151
  def _GetQueryData(self, lu):
4152
    """Computes the list of nodes and their attributes.
4153

4154
    """
4155
    # Locking is not used
4156
    assert not (compat.any(lu.glm.is_owned(level)
4157
                           for level in locking.LEVELS
4158
                           if level != locking.LEVEL_CLUSTER) or
4159
                self.do_locking or self.use_locking)
4160

    
4161
    valid_nodes = [node.name
4162
                   for node in lu.cfg.GetAllNodesInfo().values()
4163
                   if not node.offline and node.vm_capable]
4164
    pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_nodes))
4165
    cluster = lu.cfg.GetClusterInfo()
4166

    
4167
    data = {}
4168

    
4169
    for (os_name, os_data) in pol.items():
4170
      info = query.OsInfo(name=os_name, valid=True, node_status=os_data,
4171
                          hidden=(os_name in cluster.hidden_os),
4172
                          blacklisted=(os_name in cluster.blacklisted_os))
4173

    
4174
      variants = set()
4175
      parameters = set()
4176
      api_versions = set()
4177

    
4178
      for idx, osl in enumerate(os_data.values()):
4179
        info.valid = bool(info.valid and osl and osl[0][1])
4180
        if not info.valid:
4181
          break
4182

    
4183
        (node_variants, node_params, node_api) = osl[0][3:6]
4184
        if idx == 0:
4185
          # First entry
4186
          variants.update(node_variants)
4187
          parameters.update(node_params)
4188
          api_versions.update(node_api)
4189
        else:
4190
          # Filter out inconsistent values
4191
          variants.intersection_update(node_variants)
4192
          parameters.intersection_update(node_params)
4193
          api_versions.intersection_update(node_api)
4194

    
4195
      info.variants = list(variants)
4196
      info.parameters = list(parameters)
4197
      info.api_versions = list(api_versions)
4198

    
4199
      data[os_name] = info
4200

    
4201
    # Prepare data in requested order
4202
    return [data[name] for name in self._GetNames(lu, pol.keys(), None)
4203
            if name in data]
4204

    
4205

    
4206
class LUOsDiagnose(NoHooksLU):
4207
  """Logical unit for OS diagnose/query.
4208

4209
  """
4210
  REQ_BGL = False
4211

    
4212
  @staticmethod
4213
  def _BuildFilter(fields, names):
4214
    """Builds a filter for querying OSes.
4215

4216
    """
4217
    name_filter = qlang.MakeSimpleFilter("name", names)
4218

    
4219
    # Legacy behaviour: Hide hidden, blacklisted or invalid OSes if the
4220
    # respective field is not requested
4221
    status_filter = [[qlang.OP_NOT, [qlang.OP_TRUE, fname]]
4222
                     for fname in ["hidden", "blacklisted"]
4223
                     if fname not in fields]
4224
    if "valid" not in fields:
4225
      status_filter.append([qlang.OP_TRUE, "valid"])
4226

    
4227
    if status_filter:
4228
      status_filter.insert(0, qlang.OP_AND)
4229
    else:
4230
      status_filter = None
4231

    
4232
    if name_filter and status_filter:
4233
      return [qlang.OP_AND, name_filter, status_filter]
4234
    elif name_filter:
4235
      return name_filter
4236
    else:
4237
      return status_filter
4238

    
4239
  def CheckArguments(self):
4240
    self.oq = _OsQuery(self._BuildFilter(self.op.output_fields, self.op.names),
4241
                       self.op.output_fields, False)
4242

    
4243
  def ExpandNames(self):
4244
    self.oq.ExpandNames(self)
4245

    
4246
  def Exec(self, feedback_fn):
4247
    return self.oq.OldStyleQuery(self)
4248

    
4249

    
4250
class LUNodeRemove(LogicalUnit):
4251
  """Logical unit for removing a node.
4252

4253
  """
4254
  HPATH = "node-remove"
4255
  HTYPE = constants.HTYPE_NODE
4256

    
4257
  def BuildHooksEnv(self):
4258
    """Build hooks env.
4259

4260
    This doesn't run on the target node in the pre phase as a failed
4261
    node would then be impossible to remove.
4262

4263
    """
4264
    return {
4265
      "OP_TARGET": self.op.node_name,
4266
      "NODE_NAME": self.op.node_name,
4267
      }
4268

    
4269
  def BuildHooksNodes(self):
4270
    """Build hooks nodes.
4271

4272
    """
4273
    all_nodes = self.cfg.GetNodeList()
4274
    try:
4275
      all_nodes.remove(self.op.node_name)
4276
    except ValueError:
4277
      logging.warning("Node '%s', which is about to be removed, was not found"
4278
                      " in the list of all nodes", self.op.node_name)
4279
    return (all_nodes, all_nodes)
4280

    
4281
  def CheckPrereq(self):
4282
    """Check prerequisites.
4283

4284
    This checks:
4285
     - the node exists in the configuration
4286
     - it does not have primary or secondary instances
4287
     - it's not the master
4288

4289
    Any errors are signaled by raising errors.OpPrereqError.
4290

4291
    """
4292
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4293
    node = self.cfg.GetNodeInfo(self.op.node_name)
4294
    assert node is not None
4295

    
4296
    masternode = self.cfg.GetMasterNode()
4297
    if node.name == masternode:
4298
      raise errors.OpPrereqError("Node is the master node, failover to another"
4299
                                 " node is required", errors.ECODE_INVAL)
4300

    
4301
    for instance_name, instance in self.cfg.GetAllInstancesInfo():
4302
      if node.name in instance.all_nodes:
4303
        raise errors.OpPrereqError("Instance %s is still running on the node,"
4304
                                   " please remove first" % instance_name,
4305
                                   errors.ECODE_INVAL)
4306
    self.op.node_name = node.name
4307
    self.node = node
4308

    
4309
  def Exec(self, feedback_fn):
4310
    """Removes the node from the cluster.
4311

4312
    """
4313
    node = self.node
4314
    logging.info("Stopping the node daemon and removing configs from node %s",
4315
                 node.name)
4316

    
4317
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
4318

    
4319
    # Promote nodes to master candidate as needed
4320
    _AdjustCandidatePool(self, exceptions=[node.name])
4321
    self.context.RemoveNode(node.name)
4322

    
4323
    # Run post hooks on the node before it's removed
4324
    _RunPostHook(self, node.name)
4325

    
4326
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
4327
    msg = result.fail_msg
4328
    if msg:
4329
      self.LogWarning("Errors encountered on the remote node while leaving"
4330
                      " the cluster: %s", msg)
4331

    
4332
    # Remove node from our /etc/hosts
4333
    if self.cfg.GetClusterInfo().modify_etc_hosts:
4334
      master_node = self.cfg.GetMasterNode()
4335
      result = self.rpc.call_etc_hosts_modify(master_node,
4336
                                              constants.ETC_HOSTS_REMOVE,
4337
                                              node.name, None)
4338
      result.Raise("Can't update hosts file with new host data")
4339
      _RedistributeAncillaryFiles(self)
4340

    
4341

    
4342
class _NodeQuery(_QueryBase):
4343
  FIELDS = query.NODE_FIELDS
4344

    
4345
  def ExpandNames(self, lu):
4346
    lu.needed_locks = {}
4347
    lu.share_locks = _ShareAll()
4348

    
4349
    if self.names:
4350
      self.wanted = _GetWantedNodes(lu, self.names)
4351
    else:
4352
      self.wanted = locking.ALL_SET
4353

    
4354
    self.do_locking = (self.use_locking and
4355
                       query.NQ_LIVE in self.requested_data)
4356

    
4357
    if self.do_locking:
4358
      # If any non-static field is requested we need to lock the nodes
4359
      lu.needed_locks[locking.LEVEL_NODE] = self.wanted
4360

    
4361
  def DeclareLocks(self, lu, level):
4362
    pass
4363

    
4364
  def _GetQueryData(self, lu):
4365
    """Computes the list of nodes and their attributes.
4366

4367
    """
4368
    all_info = lu.cfg.GetAllNodesInfo()
4369

    
4370
    nodenames = self._GetNames(lu, all_info.keys(), locking.LEVEL_NODE)
4371

    
4372
    # Gather data as requested
4373
    if query.NQ_LIVE in self.requested_data:
4374
      # filter out non-vm_capable nodes
4375
      toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
4376

    
4377
      node_data = lu.rpc.call_node_info(toquery_nodes, lu.cfg.GetVGName(),
4378
                                        lu.cfg.GetHypervisorType())
4379
      live_data = dict((name, nresult.payload)
4380
                       for (name, nresult) in node_data.items()
4381
                       if not nresult.fail_msg and nresult.payload)
4382
    else:
4383
      live_data = None
4384

    
4385
    if query.NQ_INST in self.requested_data:
4386
      node_to_primary = dict([(name, set()) for name in nodenames])
4387
      node_to_secondary = dict([(name, set()) for name in nodenames])
4388

    
4389
      inst_data = lu.cfg.GetAllInstancesInfo()
4390

    
4391
      for inst in inst_data.values():
4392
        if inst.primary_node in node_to_primary:
4393
          node_to_primary[inst.primary_node].add(inst.name)
4394
        for secnode in inst.secondary_nodes:
4395
          if secnode in node_to_secondary:
4396
            node_to_secondary[secnode].add(inst.name)
4397
    else:
4398
      node_to_primary = None
4399
      node_to_secondary = None
4400

    
4401
    if query.NQ_OOB in self.requested_data:
4402
      oob_support = dict((name, bool(_SupportsOob(lu.cfg, node)))
4403
                         for name, node in all_info.iteritems())
4404
    else:
4405
      oob_support = None
4406

    
4407
    if query.NQ_GROUP in self.requested_data:
4408
      groups = lu.cfg.GetAllNodeGroupsInfo()
4409
    else:
4410
      groups = {}
4411

    
4412
    return query.NodeQueryData([all_info[name] for name in nodenames],
4413
                               live_data, lu.cfg.GetMasterNode(),
4414
                               node_to_primary, node_to_secondary, groups,
4415
                               oob_support, lu.cfg.GetClusterInfo())
4416

    
4417

    
4418
class LUNodeQuery(NoHooksLU):
4419
  """Logical unit for querying nodes.
4420

4421
  """
4422
  # pylint: disable=W0142
4423
  REQ_BGL = False
4424

    
4425
  def CheckArguments(self):
4426
    self.nq = _NodeQuery(qlang.MakeSimpleFilter("name", self.op.names),
4427
                         self.op.output_fields, self.op.use_locking)
4428

    
4429
  def ExpandNames(self):
4430
    self.nq.ExpandNames(self)
4431

    
4432
  def Exec(self, feedback_fn):
4433
    return self.nq.OldStyleQuery(self)
4434

    
4435

    
4436
class LUNodeQueryvols(NoHooksLU):
4437
  """Logical unit for getting volumes on node(s).
4438

4439
  """
4440
  REQ_BGL = False
4441
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
4442
  _FIELDS_STATIC = utils.FieldSet("node")
4443

    
4444
  def CheckArguments(self):
4445
    _CheckOutputFields(static=self._FIELDS_STATIC,
4446
                       dynamic=self._FIELDS_DYNAMIC,
4447
                       selected=self.op.output_fields)
4448

    
4449
  def ExpandNames(self):
4450
    self.needed_locks = {}
4451
    self.share_locks[locking.LEVEL_NODE] = 1
4452
    if not self.op.nodes:
4453
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4454
    else:
4455
      self.needed_locks[locking.LEVEL_NODE] = \
4456
        _GetWantedNodes(self, self.op.nodes)
4457

    
4458
  def Exec(self, feedback_fn):
4459
    """Computes the list of nodes and their attributes.
4460

4461
    """
4462
    nodenames = self.owned_locks(locking.LEVEL_NODE)
4463
    volumes = self.rpc.call_node_volumes(nodenames)
4464

    
4465
    ilist = self.cfg.GetAllInstancesInfo()
4466
    vol2inst = _MapInstanceDisksToNodes(ilist.values())
4467

    
4468
    output = []
4469
    for node in nodenames:
4470
      nresult = volumes[node]
4471
      if nresult.offline:
4472
        continue
4473
      msg = nresult.fail_msg
4474
      if msg:
4475
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
4476
        continue
4477

    
4478
      node_vols = sorted(nresult.payload,
4479
                         key=operator.itemgetter("dev"))
4480

    
4481
      for vol in node_vols:
4482
        node_output = []
4483
        for field in self.op.output_fields:
4484
          if field == "node":
4485
            val = node
4486
          elif field == "phys":
4487
            val = vol["dev"]
4488
          elif field == "vg":
4489
            val = vol["vg"]
4490
          elif field == "name":
4491
            val = vol["name"]
4492
          elif field == "size":
4493
            val = int(float(vol["size"]))
4494
          elif field == "instance":
4495
            val = vol2inst.get((node, vol["vg"] + "/" + vol["name"]), "-")
4496
          else:
4497
            raise errors.ParameterError(field)
4498
          node_output.append(str(val))
4499

    
4500
        output.append(node_output)
4501

    
4502
    return output
4503

    
4504

    
4505
class LUNodeQueryStorage(NoHooksLU):
4506
  """Logical unit for getting information on storage units on node(s).
4507

4508
  """
4509
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
4510
  REQ_BGL = False
4511

    
4512
  def CheckArguments(self):
4513
    _CheckOutputFields(static=self._FIELDS_STATIC,
4514
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
4515
                       selected=self.op.output_fields)
4516

    
4517
  def ExpandNames(self):
4518
    self.needed_locks = {}
4519
    self.share_locks[locking.LEVEL_NODE] = 1
4520

    
4521
    if self.op.nodes:
4522
      self.needed_locks[locking.LEVEL_NODE] = \
4523
        _GetWantedNodes(self, self.op.nodes)
4524
    else:
4525
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4526

    
4527
  def Exec(self, feedback_fn):
4528
    """Computes the list of nodes and their attributes.
4529

4530
    """
4531
    self.nodes = self.owned_locks(locking.LEVEL_NODE)
4532

    
4533
    # Always get name to sort by
4534
    if constants.SF_NAME in self.op.output_fields:
4535
      fields = self.op.output_fields[:]
4536
    else:
4537
      fields = [constants.SF_NAME] + self.op.output_fields
4538

    
4539
    # Never ask for node or type as it's only known to the LU
4540
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
4541
      while extra in fields:
4542
        fields.remove(extra)
4543

    
4544
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
4545
    name_idx = field_idx[constants.SF_NAME]
4546

    
4547
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4548
    data = self.rpc.call_storage_list(self.nodes,
4549
                                      self.op.storage_type, st_args,
4550
                                      self.op.name, fields)
4551

    
4552
    result = []
4553

    
4554
    for node in utils.NiceSort(self.nodes):
4555
      nresult = data[node]
4556
      if nresult.offline:
4557
        continue
4558

    
4559
      msg = nresult.fail_msg
4560
      if msg:
4561
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
4562
        continue
4563

    
4564
      rows = dict([(row[name_idx], row) for row in nresult.payload])
4565

    
4566
      for name in utils.NiceSort(rows.keys()):
4567
        row = rows[name]
4568

    
4569
        out = []
4570

    
4571
        for field in self.op.output_fields:
4572
          if field == constants.SF_NODE:
4573
            val = node
4574
          elif field == constants.SF_TYPE:
4575
            val = self.op.storage_type
4576
          elif field in field_idx:
4577
            val = row[field_idx[field]]
4578
          else:
4579
            raise errors.ParameterError(field)
4580

    
4581
          out.append(val)
4582

    
4583
        result.append(out)
4584

    
4585
    return result
4586

    
4587

    
4588
class _InstanceQuery(_QueryBase):
4589
  FIELDS = query.INSTANCE_FIELDS
4590

    
4591
  def ExpandNames(self, lu):
4592
    lu.needed_locks = {}
4593
    lu.share_locks = _ShareAll()
4594

    
4595
    if self.names:
4596
      self.wanted = _GetWantedInstances(lu, self.names)
4597
    else:
4598
      self.wanted = locking.ALL_SET
4599

    
4600
    self.do_locking = (self.use_locking and
4601
                       query.IQ_LIVE in self.requested_data)
4602
    if self.do_locking:
4603
      lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
4604
      lu.needed_locks[locking.LEVEL_NODEGROUP] = []
4605
      lu.needed_locks[locking.LEVEL_NODE] = []
4606
      lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4607

    
4608
    self.do_grouplocks = (self.do_locking and
4609
                          query.IQ_NODES in self.requested_data)
4610

    
4611
  def DeclareLocks(self, lu, level):
4612
    if self.do_locking:
4613
      if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
4614
        assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
4615

    
4616
        # Lock all groups used by instances optimistically; this requires going
4617
        # via the node before it's locked, requiring verification later on
4618
        lu.needed_locks[locking.LEVEL_NODEGROUP] = \
4619
          set(group_uuid
4620
              for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
4621
              for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
4622
      elif level == locking.LEVEL_NODE:
4623
        lu._LockInstancesNodes() # pylint: disable=W0212
4624

    
4625
  @staticmethod
4626
  def _CheckGroupLocks(lu):
4627
    owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
4628
    owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
4629

    
4630
    # Check if node groups for locked instances are still correct
4631
    for instance_name in owned_instances:
4632
      _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
4633

    
4634
  def _GetQueryData(self, lu):
4635
    """Computes the list of instances and their attributes.
4636

4637
    """
4638
    if self.do_grouplocks:
4639
      self._CheckGroupLocks(lu)
4640

    
4641
    cluster = lu.cfg.GetClusterInfo()
4642
    all_info = lu.cfg.GetAllInstancesInfo()
4643

    
4644
    instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
4645

    
4646
    instance_list = [all_info[name] for name in instance_names]
4647
    nodes = frozenset(itertools.chain(*(inst.all_nodes
4648
                                        for inst in instance_list)))
4649
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
4650
    bad_nodes = []
4651
    offline_nodes = []
4652
    wrongnode_inst = set()
4653

    
4654
    # Gather data as requested
4655
    if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
4656
      live_data = {}
4657
      node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
4658
      for name in nodes:
4659
        result = node_data[name]
4660
        if result.offline:
4661
          # offline nodes will be in both lists
4662
          assert result.fail_msg
4663
          offline_nodes.append(name)
4664
        if result.fail_msg:
4665
          bad_nodes.append(name)
4666
        elif result.payload:
4667
          for inst in result.payload:
4668
            if inst in all_info:
4669
              if all_info[inst].primary_node == name:
4670
                live_data.update(result.payload)
4671
              else:
4672
                wrongnode_inst.add(inst)
4673
            else:
4674
              # orphan instance; we don't list it here as we don't
4675
              # handle this case yet in the output of instance listing
4676
              logging.warning("Orphan instance '%s' found on node %s",
4677
                              inst, name)
4678
        # else no instance is alive
4679
    else:
4680
      live_data = {}
4681

    
4682
    if query.IQ_DISKUSAGE in self.requested_data:
4683
      disk_usage = dict((inst.name,
4684
                         _ComputeDiskSize(inst.disk_template,
4685
                                          [{constants.IDISK_SIZE: disk.size}
4686
                                           for disk in inst.disks]))
4687
                        for inst in instance_list)
4688
    else:
4689
      disk_usage = None
4690

    
4691
    if query.IQ_CONSOLE in self.requested_data:
4692
      consinfo = {}
4693
      for inst in instance_list:
4694
        if inst.name in live_data:
4695
          # Instance is running
4696
          consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
4697
        else:
4698
          consinfo[inst.name] = None
4699
      assert set(consinfo.keys()) == set(instance_names)
4700
    else:
4701
      consinfo = None
4702

    
4703
    if query.IQ_NODES in self.requested_data:
4704
      node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
4705
                                            instance_list)))
4706
      nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
4707
      groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
4708
                    for uuid in set(map(operator.attrgetter("group"),
4709
                                        nodes.values())))
4710
    else:
4711
      nodes = None
4712
      groups = None
4713

    
4714
    return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
4715
                                   disk_usage, offline_nodes, bad_nodes,
4716
                                   live_data, wrongnode_inst, consinfo,
4717
                                   nodes, groups)
4718

    
4719

    
4720
class LUQuery(NoHooksLU):
4721
  """Query for resources/items of a certain kind.
4722

4723
  """
4724
  # pylint: disable=W0142
4725
  REQ_BGL = False
4726

    
4727
  def CheckArguments(self):
4728
    qcls = _GetQueryImplementation(self.op.what)
4729

    
4730
    self.impl = qcls(self.op.filter, self.op.fields, self.op.use_locking)
4731

    
4732
  def ExpandNames(self):
4733
    self.impl.ExpandNames(self)
4734

    
4735
  def DeclareLocks(self, level):
4736
    self.impl.DeclareLocks(self, level)
4737

    
4738
  def Exec(self, feedback_fn):
4739
    return self.impl.NewStyleQuery(self)
4740

    
4741

    
4742
class LUQueryFields(NoHooksLU):
4743
  """Query for resources/items of a certain kind.
4744

4745
  """
4746
  # pylint: disable=W0142
4747
  REQ_BGL = False
4748

    
4749
  def CheckArguments(self):
4750
    self.qcls = _GetQueryImplementation(self.op.what)
4751

    
4752
  def ExpandNames(self):
4753
    self.needed_locks = {}
4754

    
4755
  def Exec(self, feedback_fn):
4756
    return query.QueryFields(self.qcls.FIELDS, self.op.fields)
4757

    
4758

    
4759
class LUNodeModifyStorage(NoHooksLU):
4760
  """Logical unit for modifying a storage volume on a node.
4761

4762
  """
4763
  REQ_BGL = False
4764

    
4765
  def CheckArguments(self):
4766
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
4767

    
4768
    storage_type = self.op.storage_type
4769

    
4770
    try:
4771
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
4772
    except KeyError:
4773
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
4774
                                 " modified" % storage_type,
4775
                                 errors.ECODE_INVAL)
4776

    
4777
    diff = set(self.op.changes.keys()) - modifiable
4778
    if diff:
4779
      raise errors.OpPrereqError("The following fields can not be modified for"
4780
                                 " storage units of type '%s': %r" %
4781
                                 (storage_type, list(diff)),
4782
                                 errors.ECODE_INVAL)
4783

    
4784
  def ExpandNames(self):
4785
    self.needed_locks = {
4786
      locking.LEVEL_NODE: self.op.node_name,
4787
      }
4788

    
4789
  def Exec(self, feedback_fn):
4790
    """Computes the list of nodes and their attributes.
4791

4792
    """
4793
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
4794
    result = self.rpc.call_storage_modify(self.op.node_name,
4795
                                          self.op.storage_type, st_args,
4796
                                          self.op.name, self.op.changes)
4797
    result.Raise("Failed to modify storage unit '%s' on %s" %
4798
                 (self.op.name, self.op.node_name))
4799

    
4800

    
4801
class LUNodeAdd(LogicalUnit):
4802
  """Logical unit for adding node to the cluster.
4803

4804
  """
4805
  HPATH = "node-add"
4806
  HTYPE = constants.HTYPE_NODE
4807
  _NFLAGS = ["master_capable", "vm_capable"]
4808

    
4809
  def CheckArguments(self):
4810
    self.primary_ip_family = self.cfg.GetPrimaryIPFamily()
4811
    # validate/normalize the node name
4812
    self.hostname = netutils.GetHostname(name=self.op.node_name,
4813
                                         family=self.primary_ip_family)
4814
    self.op.node_name = self.hostname.name
4815

    
4816
    if self.op.readd and self.op.node_name == self.cfg.GetMasterNode():
4817
      raise errors.OpPrereqError("Cannot readd the master node",
4818
                                 errors.ECODE_STATE)
4819

    
4820
    if self.op.readd and self.op.group:
4821
      raise errors.OpPrereqError("Cannot pass a node group when a node is"
4822
                                 " being readded", errors.ECODE_INVAL)
4823

    
4824
  def BuildHooksEnv(self):
4825
    """Build hooks env.
4826

4827
    This will run on all nodes before, and on all nodes + the new node after.
4828

4829
    """
4830
    return {
4831
      "OP_TARGET": self.op.node_name,
4832
      "NODE_NAME": self.op.node_name,
4833
      "NODE_PIP": self.op.primary_ip,
4834
      "NODE_SIP": self.op.secondary_ip,
4835
      "MASTER_CAPABLE": str(self.op.master_capable),
4836
      "VM_CAPABLE": str(self.op.vm_capable),
4837
      }
4838

    
4839
  def BuildHooksNodes(self):
4840
    """Build hooks nodes.
4841

4842
    """
4843
    # Exclude added node
4844
    pre_nodes = list(set(self.cfg.GetNodeList()) - set([self.op.node_name]))
4845
    post_nodes = pre_nodes + [self.op.node_name, ]
4846

    
4847
    return (pre_nodes, post_nodes)
4848

    
4849
  def CheckPrereq(self):
4850
    """Check prerequisites.
4851

4852
    This checks:
4853
     - the new node is not already in the config
4854
     - it is resolvable
4855
     - its parameters (single/dual homed) matches the cluster
4856

4857
    Any errors are signaled by raising errors.OpPrereqError.
4858

4859
    """
4860
    cfg = self.cfg
4861
    hostname = self.hostname
4862
    node = hostname.name
4863
    primary_ip = self.op.primary_ip = hostname.ip
4864
    if self.op.secondary_ip is None:
4865
      if self.primary_ip_family == netutils.IP6Address.family:
4866
        raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
4867
                                   " IPv4 address must be given as secondary",
4868
                                   errors.ECODE_INVAL)
4869
      self.op.secondary_ip = primary_ip
4870

    
4871
    secondary_ip = self.op.secondary_ip
4872
    if not netutils.IP4Address.IsValid(secondary_ip):
4873
      raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4874
                                 " address" % secondary_ip, errors.ECODE_INVAL)
4875

    
4876
    node_list = cfg.GetNodeList()
4877
    if not self.op.readd and node in node_list:
4878
      raise errors.OpPrereqError("Node %s is already in the configuration" %
4879
                                 node, errors.ECODE_EXISTS)
4880
    elif self.op.readd and node not in node_list:
4881
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
4882
                                 errors.ECODE_NOENT)
4883

    
4884
    self.changed_primary_ip = False
4885

    
4886
    for existing_node_name, existing_node in cfg.GetMultiNodeInfo(node_list):
4887
      if self.op.readd and node == existing_node_name:
4888
        if existing_node.secondary_ip != secondary_ip:
4889
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
4890
                                     " address configuration as before",
4891
                                     errors.ECODE_INVAL)
4892
        if existing_node.primary_ip != primary_ip:
4893
          self.changed_primary_ip = True
4894

    
4895
        continue
4896

    
4897
      if (existing_node.primary_ip == primary_ip or
4898
          existing_node.secondary_ip == primary_ip or
4899
          existing_node.primary_ip == secondary_ip or
4900
          existing_node.secondary_ip == secondary_ip):
4901
        raise errors.OpPrereqError("New node ip address(es) conflict with"
4902
                                   " existing node %s" % existing_node.name,
4903
                                   errors.ECODE_NOTUNIQUE)
4904

    
4905
    # After this 'if' block, None is no longer a valid value for the
4906
    # _capable op attributes
4907
    if self.op.readd:
4908
      old_node = self.cfg.GetNodeInfo(node)
4909
      assert old_node is not None, "Can't retrieve locked node %s" % node
4910
      for attr in self._NFLAGS:
4911
        if getattr(self.op, attr) is None:
4912
          setattr(self.op, attr, getattr(old_node, attr))
4913
    else:
4914
      for attr in self._NFLAGS:
4915
        if getattr(self.op, attr) is None:
4916
          setattr(self.op, attr, True)
4917

    
4918
    if self.op.readd and not self.op.vm_capable:
4919
      pri, sec = cfg.GetNodeInstances(node)
4920
      if pri or sec:
4921
        raise errors.OpPrereqError("Node %s being re-added with vm_capable"
4922
                                   " flag set to false, but it already holds"
4923
                                   " instances" % node,
4924
                                   errors.ECODE_STATE)
4925

    
4926
    # check that the type of the node (single versus dual homed) is the
4927
    # same as for the master
4928
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
4929
    master_singlehomed = myself.secondary_ip == myself.primary_ip
4930
    newbie_singlehomed = secondary_ip == primary_ip
4931
    if master_singlehomed != newbie_singlehomed:
4932
      if master_singlehomed:
4933
        raise errors.OpPrereqError("The master has no secondary ip but the"
4934
                                   " new node has one",
4935
                                   errors.ECODE_INVAL)
4936
      else:
4937
        raise errors.OpPrereqError("The master has a secondary ip but the"
4938
                                   " new node doesn't have one",
4939
                                   errors.ECODE_INVAL)
4940

    
4941
    # checks reachability
4942
    if not netutils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
4943
      raise errors.OpPrereqError("Node not reachable by ping",
4944
                                 errors.ECODE_ENVIRON)
4945

    
4946
    if not newbie_singlehomed:
4947
      # check reachability from my secondary ip to newbie's secondary ip
4948
      if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
4949
                           source=myself.secondary_ip):
4950
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4951
                                   " based ping to node daemon port",
4952
                                   errors.ECODE_ENVIRON)
4953

    
4954
    if self.op.readd:
4955
      exceptions = [node]
4956
    else:
4957
      exceptions = []
4958

    
4959
    if self.op.master_capable:
4960
      self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
4961
    else:
4962
      self.master_candidate = False
4963

    
4964
    if self.op.readd:
4965
      self.new_node = old_node
4966
    else:
4967
      node_group = cfg.LookupNodeGroup(self.op.group)
4968
      self.new_node = objects.Node(name=node,
4969
                                   primary_ip=primary_ip,
4970
                                   secondary_ip=secondary_ip,
4971
                                   master_candidate=self.master_candidate,
4972
                                   offline=False, drained=False,
4973
                                   group=node_group)
4974

    
4975
    if self.op.ndparams:
4976
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
4977

    
4978
  def Exec(self, feedback_fn):
4979
    """Adds the new node to the cluster.
4980

4981
    """
4982
    new_node = self.new_node
4983
    node = new_node.name
4984

    
4985
    # We adding a new node so we assume it's powered
4986
    new_node.powered = True
4987

    
4988
    # for re-adds, reset the offline/drained/master-candidate flags;
4989
    # we need to reset here, otherwise offline would prevent RPC calls
4990
    # later in the procedure; this also means that if the re-add
4991
    # fails, we are left with a non-offlined, broken node
4992
    if self.op.readd:
4993
      new_node.drained = new_node.offline = False # pylint: disable=W0201
4994
      self.LogInfo("Readding a node, the offline/drained flags were reset")
4995
      # if we demote the node, we do cleanup later in the procedure
4996
      new_node.master_candidate = self.master_candidate
4997
      if self.changed_primary_ip:
4998
        new_node.primary_ip = self.op.primary_ip
4999

    
5000
    # copy the master/vm_capable flags
5001
    for attr in self._NFLAGS:
5002
      setattr(new_node, attr, getattr(self.op, attr))
5003

    
5004
    # notify the user about any possible mc promotion
5005
    if new_node.master_candidate:
5006
      self.LogInfo("Node will be a master candidate")
5007

    
5008
    if self.op.ndparams:
5009
      new_node.ndparams = self.op.ndparams
5010
    else:
5011
      new_node.ndparams = {}
5012

    
5013
    # check connectivity
5014
    result = self.rpc.call_version([node])[node]
5015
    result.Raise("Can't get version information from node %s" % node)
5016
    if constants.PROTOCOL_VERSION == result.payload:
5017
      logging.info("Communication to node %s fine, sw version %s match",
5018
                   node, result.payload)
5019
    else:
5020
      raise errors.OpExecError("Version mismatch master version %s,"
5021
                               " node version %s" %
5022
                               (constants.PROTOCOL_VERSION, result.payload))
5023

    
5024
    # Add node to our /etc/hosts, and add key to known_hosts
5025
    if self.cfg.GetClusterInfo().modify_etc_hosts:
5026
      master_node = self.cfg.GetMasterNode()
5027
      result = self.rpc.call_etc_hosts_modify(master_node,
5028
                                              constants.ETC_HOSTS_ADD,
5029
                                              self.hostname.name,
5030
                                              self.hostname.ip)
5031
      result.Raise("Can't update hosts file with new host data")
5032

    
5033
    if new_node.secondary_ip != new_node.primary_ip:
5034
      _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
5035
                               False)
5036

    
5037
    node_verify_list = [self.cfg.GetMasterNode()]
5038
    node_verify_param = {
5039
      constants.NV_NODELIST: [node],
5040
      # TODO: do a node-net-test as well?
5041
    }
5042

    
5043
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
5044
                                       self.cfg.GetClusterName())
5045
    for verifier in node_verify_list:
5046
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
5047
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
5048
      if nl_payload:
5049
        for failed in nl_payload:
5050
          feedback_fn("ssh/hostname verification failed"
5051
                      " (checking from %s): %s" %
5052
                      (verifier, nl_payload[failed]))
5053
        raise errors.OpExecError("ssh/hostname verification failed")
5054

    
5055
    if self.op.readd:
5056
      _RedistributeAncillaryFiles(self)
5057
      self.context.ReaddNode(new_node)
5058
      # make sure we redistribute the config
5059
      self.cfg.Update(new_node, feedback_fn)
5060
      # and make sure the new node will not have old files around
5061
      if not new_node.master_candidate:
5062
        result = self.rpc.call_node_demote_from_mc(new_node.name)
5063
        msg = result.fail_msg
5064
        if msg:
5065
          self.LogWarning("Node failed to demote itself from master"
5066
                          " candidate status: %s" % msg)
5067
    else:
5068
      _RedistributeAncillaryFiles(self, additional_nodes=[node],
5069
                                  additional_vm=self.op.vm_capable)
5070
      self.context.AddNode(new_node, self.proc.GetECId())
5071

    
5072

    
5073
class LUNodeSetParams(LogicalUnit):
5074
  """Modifies the parameters of a node.
5075

5076
  @cvar _F2R: a dictionary from tuples of flags (mc, drained, offline)
5077
      to the node role (as _ROLE_*)
5078
  @cvar _R2F: a dictionary from node role to tuples of flags
5079
  @cvar _FLAGS: a list of attribute names corresponding to the flags
5080

5081
  """
5082
  HPATH = "node-modify"
5083
  HTYPE = constants.HTYPE_NODE
5084
  REQ_BGL = False
5085
  (_ROLE_CANDIDATE, _ROLE_DRAINED, _ROLE_OFFLINE, _ROLE_REGULAR) = range(4)
5086
  _F2R = {
5087
    (True, False, False): _ROLE_CANDIDATE,
5088
    (False, True, False): _ROLE_DRAINED,
5089
    (False, False, True): _ROLE_OFFLINE,
5090
    (False, False, False): _ROLE_REGULAR,
5091
    }
5092
  _R2F = dict((v, k) for k, v in _F2R.items())
5093
  _FLAGS = ["master_candidate", "drained", "offline"]
5094

    
5095
  def CheckArguments(self):
5096
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5097
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
5098
                self.op.master_capable, self.op.vm_capable,
5099
                self.op.secondary_ip, self.op.ndparams]
5100
    if all_mods.count(None) == len(all_mods):
5101
      raise errors.OpPrereqError("Please pass at least one modification",
5102
                                 errors.ECODE_INVAL)
5103
    if all_mods.count(True) > 1:
5104
      raise errors.OpPrereqError("Can't set the node into more than one"
5105
                                 " state at the same time",
5106
                                 errors.ECODE_INVAL)
5107

    
5108
    # Boolean value that tells us whether we might be demoting from MC
5109
    self.might_demote = (self.op.master_candidate == False or
5110
                         self.op.offline == True or
5111
                         self.op.drained == True or
5112
                         self.op.master_capable == False)
5113

    
5114
    if self.op.secondary_ip:
5115
      if not netutils.IP4Address.IsValid(self.op.secondary_ip):
5116
        raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
5117
                                   " address" % self.op.secondary_ip,
5118
                                   errors.ECODE_INVAL)
5119

    
5120
    self.lock_all = self.op.auto_promote and self.might_demote
5121
    self.lock_instances = self.op.secondary_ip is not None
5122

    
5123
  def ExpandNames(self):
5124
    if self.lock_all:
5125
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
5126
    else:
5127
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
5128

    
5129
    if self.lock_instances:
5130
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5131

    
5132
  def DeclareLocks(self, level):
5133
    # If we have locked all instances, before waiting to lock nodes, release
5134
    # all the ones living on nodes unrelated to the current operation.
5135
    if level == locking.LEVEL_NODE and self.lock_instances:
5136
      self.affected_instances = []
5137
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
5138
        instances_keep = []
5139

    
5140
        # Build list of instances to release
5141
        locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
5142
        for instance_name, instance in self.cfg.GetMultiInstanceInfo(locked_i):
5143
          if (instance.disk_template in constants.DTS_INT_MIRROR and
5144
              self.op.node_name in instance.all_nodes):
5145
            instances_keep.append(instance_name)
5146
            self.affected_instances.append(instance)
5147

    
5148
        _ReleaseLocks(self, locking.LEVEL_INSTANCE, keep=instances_keep)
5149

    
5150
        assert (set(self.owned_locks(locking.LEVEL_INSTANCE)) ==
5151
                set(instances_keep))
5152

    
5153
  def BuildHooksEnv(self):
5154
    """Build hooks env.
5155

5156
    This runs on the master node.
5157

5158
    """
5159
    return {
5160
      "OP_TARGET": self.op.node_name,
5161
      "MASTER_CANDIDATE": str(self.op.master_candidate),
5162
      "OFFLINE": str(self.op.offline),
5163
      "DRAINED": str(self.op.drained),
5164
      "MASTER_CAPABLE": str(self.op.master_capable),
5165
      "VM_CAPABLE": str(self.op.vm_capable),
5166
      }
5167

    
5168
  def BuildHooksNodes(self):
5169
    """Build hooks nodes.
5170

5171
    """
5172
    nl = [self.cfg.GetMasterNode(), self.op.node_name]
5173
    return (nl, nl)
5174

    
5175
  def CheckPrereq(self):
5176
    """Check prerequisites.
5177

5178
    This only checks the instance list against the existing names.
5179

5180
    """
5181
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
5182

    
5183
    if (self.op.master_candidate is not None or
5184
        self.op.drained is not None or
5185
        self.op.offline is not None):
5186
      # we can't change the master's node flags
5187
      if self.op.node_name == self.cfg.GetMasterNode():
5188
        raise errors.OpPrereqError("The master role can be changed"
5189
                                   " only via master-failover",
5190
                                   errors.ECODE_INVAL)
5191

    
5192
    if self.op.master_candidate and not node.master_capable:
5193
      raise errors.OpPrereqError("Node %s is not master capable, cannot make"
5194
                                 " it a master candidate" % node.name,
5195
                                 errors.ECODE_STATE)
5196

    
5197
    if self.op.vm_capable == False:
5198
      (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
5199
      if ipri or isec:
5200
        raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
5201
                                   " the vm_capable flag" % node.name,
5202
                                   errors.ECODE_STATE)
5203

    
5204
    if node.master_candidate and self.might_demote and not self.lock_all:
5205
      assert not self.op.auto_promote, "auto_promote set but lock_all not"
5206
      # check if after removing the current node, we're missing master
5207
      # candidates
5208
      (mc_remaining, mc_should, _) = \
5209
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
5210
      if mc_remaining < mc_should:
5211
        raise errors.OpPrereqError("Not enough master candidates, please"
5212
                                   " pass auto promote option to allow"
5213
                                   " promotion", errors.ECODE_STATE)
5214

    
5215
    self.old_flags = old_flags = (node.master_candidate,
5216
                                  node.drained, node.offline)
5217
    assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
5218
    self.old_role = old_role = self._F2R[old_flags]
5219

    
5220
    # Check for ineffective changes
5221
    for attr in self._FLAGS:
5222
      if (getattr(self.op, attr) == False and getattr(node, attr) == False):
5223
        self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
5224
        setattr(self.op, attr, None)
5225

    
5226
    # Past this point, any flag change to False means a transition
5227
    # away from the respective state, as only real changes are kept
5228

    
5229
    # TODO: We might query the real power state if it supports OOB
5230
    if _SupportsOob(self.cfg, node):
5231
      if self.op.offline is False and not (node.powered or
5232
                                           self.op.powered == True):
5233
        raise errors.OpPrereqError(("Node %s needs to be turned on before its"
5234
                                    " offline status can be reset") %
5235
                                   self.op.node_name)
5236
    elif self.op.powered is not None:
5237
      raise errors.OpPrereqError(("Unable to change powered state for node %s"
5238
                                  " as it does not support out-of-band"
5239
                                  " handling") % self.op.node_name)
5240

    
5241
    # If we're being deofflined/drained, we'll MC ourself if needed
5242
    if (self.op.drained == False or self.op.offline == False or
5243
        (self.op.master_capable and not node.master_capable)):
5244
      if _DecideSelfPromotion(self):
5245
        self.op.master_candidate = True
5246
        self.LogInfo("Auto-promoting node to master candidate")
5247

    
5248
    # If we're no longer master capable, we'll demote ourselves from MC
5249
    if self.op.master_capable == False and node.master_candidate:
5250
      self.LogInfo("Demoting from master candidate")
5251
      self.op.master_candidate = False
5252

    
5253
    # Compute new role
5254
    assert [getattr(self.op, attr) for attr in self._FLAGS].count(True) <= 1
5255
    if self.op.master_candidate:
5256
      new_role = self._ROLE_CANDIDATE
5257
    elif self.op.drained:
5258
      new_role = self._ROLE_DRAINED
5259
    elif self.op.offline:
5260
      new_role = self._ROLE_OFFLINE
5261
    elif False in [self.op.master_candidate, self.op.drained, self.op.offline]:
5262
      # False is still in new flags, which means we're un-setting (the
5263
      # only) True flag
5264
      new_role = self._ROLE_REGULAR
5265
    else: # no new flags, nothing, keep old role
5266
      new_role = old_role
5267

    
5268
    self.new_role = new_role
5269

    
5270
    if old_role == self._ROLE_OFFLINE and new_role != old_role:
5271
      # Trying to transition out of offline status
5272
      result = self.rpc.call_version([node.name])[node.name]
5273
      if result.fail_msg:
5274
        raise errors.OpPrereqError("Node %s is being de-offlined but fails"
5275
                                   " to report its version: %s" %
5276
                                   (node.name, result.fail_msg),
5277
                                   errors.ECODE_STATE)
5278
      else:
5279
        self.LogWarning("Transitioning node from offline to online state"
5280
                        " without using re-add. Please make sure the node"
5281
                        " is healthy!")
5282

    
5283
    if self.op.secondary_ip:
5284
      # Ok even without locking, because this can't be changed by any LU
5285
      master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
5286
      master_singlehomed = master.secondary_ip == master.primary_ip
5287
      if master_singlehomed and self.op.secondary_ip:
5288
        raise errors.OpPrereqError("Cannot change the secondary ip on a single"
5289
                                   " homed cluster", errors.ECODE_INVAL)
5290

    
5291
      if node.offline:
5292
        if self.affected_instances:
5293
          raise errors.OpPrereqError("Cannot change secondary ip: offline"
5294
                                     " node has instances (%s) configured"
5295
                                     " to use it" % self.affected_instances)
5296
      else:
5297
        # On online nodes, check that no instances are running, and that
5298
        # the node has the new ip and we can reach it.
5299
        for instance in self.affected_instances:
5300
          _CheckInstanceDown(self, instance, "cannot change secondary ip")
5301

    
5302
        _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
5303
        if master.name != node.name:
5304
          # check reachability from master secondary ip to new secondary ip
5305
          if not netutils.TcpPing(self.op.secondary_ip,
5306
                                  constants.DEFAULT_NODED_PORT,
5307
                                  source=master.secondary_ip):
5308
            raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
5309
                                       " based ping to node daemon port",
5310
                                       errors.ECODE_ENVIRON)
5311

    
5312
    if self.op.ndparams:
5313
      new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
5314
      utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
5315
      self.new_ndparams = new_ndparams
5316

    
5317
  def Exec(self, feedback_fn):
5318
    """Modifies a node.
5319

5320
    """
5321
    node = self.node
5322
    old_role = self.old_role
5323
    new_role = self.new_role
5324

    
5325
    result = []
5326

    
5327
    if self.op.ndparams:
5328
      node.ndparams = self.new_ndparams
5329

    
5330
    if self.op.powered is not None:
5331
      node.powered = self.op.powered
5332

    
5333
    for attr in ["master_capable", "vm_capable"]:
5334
      val = getattr(self.op, attr)
5335
      if val is not None:
5336
        setattr(node, attr, val)
5337
        result.append((attr, str(val)))
5338

    
5339
    if new_role != old_role:
5340
      # Tell the node to demote itself, if no longer MC and not offline
5341
      if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
5342
        msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
5343
        if msg:
5344
          self.LogWarning("Node failed to demote itself: %s", msg)
5345

    
5346
      new_flags = self._R2F[new_role]
5347
      for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
5348
        if of != nf:
5349
          result.append((desc, str(nf)))
5350
      (node.master_candidate, node.drained, node.offline) = new_flags
5351

    
5352
      # we locked all nodes, we adjust the CP before updating this node
5353
      if self.lock_all:
5354
        _AdjustCandidatePool(self, [node.name])
5355

    
5356
    if self.op.secondary_ip:
5357
      node.secondary_ip = self.op.secondary_ip
5358
      result.append(("secondary_ip", self.op.secondary_ip))
5359

    
5360
    # this will trigger configuration file update, if needed
5361
    self.cfg.Update(node, feedback_fn)
5362

    
5363
    # this will trigger job queue propagation or cleanup if the mc
5364
    # flag changed
5365
    if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
5366
      self.context.ReaddNode(node)
5367

    
5368
    return result
5369

    
5370

    
5371
class LUNodePowercycle(NoHooksLU):
5372
  """Powercycles a node.
5373

5374
  """
5375
  REQ_BGL = False
5376

    
5377
  def CheckArguments(self):
5378
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5379
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
5380
      raise errors.OpPrereqError("The node is the master and the force"
5381
                                 " parameter was not set",
5382
                                 errors.ECODE_INVAL)
5383

    
5384
  def ExpandNames(self):
5385
    """Locking for PowercycleNode.
5386

5387
    This is a last-resort option and shouldn't block on other
5388
    jobs. Therefore, we grab no locks.
5389

5390
    """
5391
    self.needed_locks = {}
5392

    
5393
  def Exec(self, feedback_fn):
5394
    """Reboots a node.
5395

5396
    """
5397
    result = self.rpc.call_node_powercycle(self.op.node_name,
5398
                                           self.cfg.GetHypervisorType())
5399
    result.Raise("Failed to schedule the reboot")
5400
    return result.payload
5401

    
5402

    
5403
class LUClusterQuery(NoHooksLU):
5404
  """Query cluster configuration.
5405

5406
  """
5407
  REQ_BGL = False
5408

    
5409
  def ExpandNames(self):
5410
    self.needed_locks = {}
5411

    
5412
  def Exec(self, feedback_fn):
5413
    """Return cluster config.
5414

5415
    """
5416
    cluster = self.cfg.GetClusterInfo()
5417
    os_hvp = {}
5418

    
5419
    # Filter just for enabled hypervisors
5420
    for os_name, hv_dict in cluster.os_hvp.items():
5421
      os_hvp[os_name] = {}
5422
      for hv_name, hv_params in hv_dict.items():
5423
        if hv_name in cluster.enabled_hypervisors:
5424
          os_hvp[os_name][hv_name] = hv_params
5425

    
5426
    # Convert ip_family to ip_version
5427
    primary_ip_version = constants.IP4_VERSION
5428
    if cluster.primary_ip_family == netutils.IP6Address.family:
5429
      primary_ip_version = constants.IP6_VERSION
5430

    
5431
    result = {
5432
      "software_version": constants.RELEASE_VERSION,
5433
      "protocol_version": constants.PROTOCOL_VERSION,
5434
      "config_version": constants.CONFIG_VERSION,
5435
      "os_api_version": max(constants.OS_API_VERSIONS),
5436
      "export_version": constants.EXPORT_VERSION,
5437
      "architecture": (platform.architecture()[0], platform.machine()),
5438
      "name": cluster.cluster_name,
5439
      "master": cluster.master_node,
5440
      "default_hypervisor": cluster.enabled_hypervisors[0],
5441
      "enabled_hypervisors": cluster.enabled_hypervisors,
5442
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
5443
                        for hypervisor_name in cluster.enabled_hypervisors]),
5444
      "os_hvp": os_hvp,
5445
      "beparams": cluster.beparams,
5446
      "osparams": cluster.osparams,
5447
      "nicparams": cluster.nicparams,
5448
      "ndparams": cluster.ndparams,
5449
      "candidate_pool_size": cluster.candidate_pool_size,
5450
      "master_netdev": cluster.master_netdev,
5451
      "volume_group_name": cluster.volume_group_name,
5452
      "drbd_usermode_helper": cluster.drbd_usermode_helper,
5453
      "file_storage_dir": cluster.file_storage_dir,
5454
      "shared_file_storage_dir": cluster.shared_file_storage_dir,
5455
      "maintain_node_health": cluster.maintain_node_health,
5456
      "ctime": cluster.ctime,
5457
      "mtime": cluster.mtime,
5458
      "uuid": cluster.uuid,
5459
      "tags": list(cluster.GetTags()),
5460
      "uid_pool": cluster.uid_pool,
5461
      "default_iallocator": cluster.default_iallocator,
5462
      "reserved_lvs": cluster.reserved_lvs,
5463
      "primary_ip_version": primary_ip_version,
5464
      "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
5465
      "hidden_os": cluster.hidden_os,
5466
      "blacklisted_os": cluster.blacklisted_os,
5467
      }
5468

    
5469
    return result
5470

    
5471

    
5472
class LUClusterConfigQuery(NoHooksLU):
5473
  """Return configuration values.
5474

5475
  """
5476
  REQ_BGL = False
5477
  _FIELDS_DYNAMIC = utils.FieldSet()
5478
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
5479
                                  "watcher_pause", "volume_group_name")
5480

    
5481
  def CheckArguments(self):
5482
    _CheckOutputFields(static=self._FIELDS_STATIC,
5483
                       dynamic=self._FIELDS_DYNAMIC,
5484
                       selected=self.op.output_fields)
5485

    
5486
  def ExpandNames(self):
5487
    self.needed_locks = {}
5488

    
5489
  def Exec(self, feedback_fn):
5490
    """Dump a representation of the cluster config to the standard output.
5491

5492
    """
5493
    values = []
5494
    for field in self.op.output_fields:
5495
      if field == "cluster_name":
5496
        entry = self.cfg.GetClusterName()
5497
      elif field == "master_node":
5498
        entry = self.cfg.GetMasterNode()
5499
      elif field == "drain_flag":
5500
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
5501
      elif field == "watcher_pause":
5502
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
5503
      elif field == "volume_group_name":
5504
        entry = self.cfg.GetVGName()
5505
      else:
5506
        raise errors.ParameterError(field)
5507
      values.append(entry)
5508
    return values
5509

    
5510

    
5511
class LUInstanceActivateDisks(NoHooksLU):
5512
  """Bring up an instance's disks.
5513

5514
  """
5515
  REQ_BGL = False
5516

    
5517
  def ExpandNames(self):
5518
    self._ExpandAndLockInstance()
5519
    self.needed_locks[locking.LEVEL_NODE] = []
5520
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5521

    
5522
  def DeclareLocks(self, level):
5523
    if level == locking.LEVEL_NODE:
5524
      self._LockInstancesNodes()
5525

    
5526
  def CheckPrereq(self):
5527
    """Check prerequisites.
5528

5529
    This checks that the instance is in the cluster.
5530

5531
    """
5532
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5533
    assert self.instance is not None, \
5534
      "Cannot retrieve locked instance %s" % self.op.instance_name
5535
    _CheckNodeOnline(self, self.instance.primary_node)
5536

    
5537
  def Exec(self, feedback_fn):
5538
    """Activate the disks.
5539

5540
    """
5541
    disks_ok, disks_info = \
5542
              _AssembleInstanceDisks(self, self.instance,
5543
                                     ignore_size=self.op.ignore_size)
5544
    if not disks_ok:
5545
      raise errors.OpExecError("Cannot activate block devices")
5546

    
5547
    return disks_info
5548

    
5549

    
5550
def _AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
5551
                           ignore_size=False):
5552
  """Prepare the block devices for an instance.
5553

5554
  This sets up the block devices on all nodes.
5555

5556
  @type lu: L{LogicalUnit}
5557
  @param lu: the logical unit on whose behalf we execute
5558
  @type instance: L{objects.Instance}
5559
  @param instance: the instance for whose disks we assemble
5560
  @type disks: list of L{objects.Disk} or None
5561
  @param disks: which disks to assemble (or all, if None)
5562
  @type ignore_secondaries: boolean
5563
  @param ignore_secondaries: if true, errors on secondary nodes
5564
      won't result in an error return from the function
5565
  @type ignore_size: boolean
5566
  @param ignore_size: if true, the current known size of the disk
5567
      will not be used during the disk activation, useful for cases
5568
      when the size is wrong
5569
  @return: False if the operation failed, otherwise a list of
5570
      (host, instance_visible_name, node_visible_name)
5571
      with the mapping from node devices to instance devices
5572

5573
  """
5574
  device_info = []
5575
  disks_ok = True
5576
  iname = instance.name
5577
  disks = _ExpandCheckDisks(instance, disks)
5578

    
5579
  # With the two passes mechanism we try to reduce the window of
5580
  # opportunity for the race condition of switching DRBD to primary
5581
  # before handshaking occured, but we do not eliminate it
5582

    
5583
  # The proper fix would be to wait (with some limits) until the
5584
  # connection has been made and drbd transitions from WFConnection
5585
  # into any other network-connected state (Connected, SyncTarget,
5586
  # SyncSource, etc.)
5587

    
5588
  # 1st pass, assemble on all nodes in secondary mode
5589
  for idx, inst_disk in enumerate(disks):
5590
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
5591
      if ignore_size:
5592
        node_disk = node_disk.Copy()
5593
        node_disk.UnsetSize()
5594
      lu.cfg.SetDiskID(node_disk, node)
5595
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False, idx)
5596
      msg = result.fail_msg
5597
      if msg:
5598
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
5599
                           " (is_primary=False, pass=1): %s",
5600
                           inst_disk.iv_name, node, msg)
5601
        if not ignore_secondaries:
5602
          disks_ok = False
5603

    
5604
  # FIXME: race condition on drbd migration to primary
5605

    
5606
  # 2nd pass, do only the primary node
5607
  for idx, inst_disk in enumerate(disks):
5608
    dev_path = None
5609

    
5610
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
5611
      if node != instance.primary_node:
5612
        continue
5613
      if ignore_size:
5614
        node_disk = node_disk.Copy()
5615
        node_disk.UnsetSize()
5616
      lu.cfg.SetDiskID(node_disk, node)
5617
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True, idx)
5618
      msg = result.fail_msg
5619
      if msg:
5620
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
5621
                           " (is_primary=True, pass=2): %s",
5622
                           inst_disk.iv_name, node, msg)
5623
        disks_ok = False
5624
      else:
5625
        dev_path = result.payload
5626

    
5627
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
5628

    
5629
  # leave the disks configured for the primary node
5630
  # this is a workaround that would be fixed better by
5631
  # improving the logical/physical id handling
5632
  for disk in disks:
5633
    lu.cfg.SetDiskID(disk, instance.primary_node)
5634

    
5635
  return disks_ok, device_info
5636

    
5637

    
5638
def _StartInstanceDisks(lu, instance, force):
5639
  """Start the disks of an instance.
5640

5641
  """
5642
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
5643
                                           ignore_secondaries=force)
5644
  if not disks_ok:
5645
    _ShutdownInstanceDisks(lu, instance)
5646
    if force is not None and not force:
5647
      lu.proc.LogWarning("", hint="If the message above refers to a"
5648
                         " secondary node,"
5649
                         " you can retry the operation using '--force'.")
5650
    raise errors.OpExecError("Disk consistency error")
5651

    
5652

    
5653
class LUInstanceDeactivateDisks(NoHooksLU):
5654
  """Shutdown an instance's disks.
5655

5656
  """
5657
  REQ_BGL = False
5658

    
5659
  def ExpandNames(self):
5660
    self._ExpandAndLockInstance()
5661
    self.needed_locks[locking.LEVEL_NODE] = []
5662
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5663

    
5664
  def DeclareLocks(self, level):
5665
    if level == locking.LEVEL_NODE:
5666
      self._LockInstancesNodes()
5667

    
5668
  def CheckPrereq(self):
5669
    """Check prerequisites.
5670

5671
    This checks that the instance is in the cluster.
5672

5673
    """
5674
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5675
    assert self.instance is not None, \
5676
      "Cannot retrieve locked instance %s" % self.op.instance_name
5677

    
5678
  def Exec(self, feedback_fn):
5679
    """Deactivate the disks
5680

5681
    """
5682
    instance = self.instance
5683
    if self.op.force:
5684
      _ShutdownInstanceDisks(self, instance)
5685
    else:
5686
      _SafeShutdownInstanceDisks(self, instance)
5687

    
5688

    
5689
def _SafeShutdownInstanceDisks(lu, instance, disks=None):
5690
  """Shutdown block devices of an instance.
5691

5692
  This function checks if an instance is running, before calling
5693
  _ShutdownInstanceDisks.
5694

5695
  """
5696
  _CheckInstanceDown(lu, instance, "cannot shutdown disks")
5697
  _ShutdownInstanceDisks(lu, instance, disks=disks)
5698

    
5699

    
5700
def _ExpandCheckDisks(instance, disks):
5701
  """Return the instance disks selected by the disks list
5702

5703
  @type disks: list of L{objects.Disk} or None
5704
  @param disks: selected disks
5705
  @rtype: list of L{objects.Disk}
5706
  @return: selected instance disks to act on
5707

5708
  """
5709
  if disks is None:
5710
    return instance.disks
5711
  else:
5712
    if not set(disks).issubset(instance.disks):
5713
      raise errors.ProgrammerError("Can only act on disks belonging to the"
5714
                                   " target instance")
5715
    return disks
5716

    
5717

    
5718
def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
5719
  """Shutdown block devices of an instance.
5720

5721
  This does the shutdown on all nodes of the instance.
5722

5723
  If the ignore_primary is false, errors on the primary node are
5724
  ignored.
5725

5726
  """
5727
  all_result = True
5728
  disks = _ExpandCheckDisks(instance, disks)
5729

    
5730
  for disk in disks:
5731
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
5732
      lu.cfg.SetDiskID(top_disk, node)
5733
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
5734
      msg = result.fail_msg
5735
      if msg:
5736
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
5737
                      disk.iv_name, node, msg)
5738
        if ((node == instance.primary_node and not ignore_primary) or
5739
            (node != instance.primary_node and not result.offline)):
5740
          all_result = False
5741
  return all_result
5742

    
5743

    
5744
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
5745
  """Checks if a node has enough free memory.
5746

5747
  This function check if a given node has the needed amount of free
5748
  memory. In case the node has less memory or we cannot get the
5749
  information from the node, this function raise an OpPrereqError
5750
  exception.
5751

5752
  @type lu: C{LogicalUnit}
5753
  @param lu: a logical unit from which we get configuration data
5754
  @type node: C{str}
5755
  @param node: the node to check
5756
  @type reason: C{str}
5757
  @param reason: string to use in the error message
5758
  @type requested: C{int}
5759
  @param requested: the amount of memory in MiB to check for
5760
  @type hypervisor_name: C{str}
5761
  @param hypervisor_name: the hypervisor to ask for memory stats
5762
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
5763
      we cannot check the node
5764

5765
  """
5766
  nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name)
5767
  nodeinfo[node].Raise("Can't get data from node %s" % node,
5768
                       prereq=True, ecode=errors.ECODE_ENVIRON)
5769
  free_mem = nodeinfo[node].payload.get("memory_free", None)
5770
  if not isinstance(free_mem, int):
5771
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
5772
                               " was '%s'" % (node, free_mem),
5773
                               errors.ECODE_ENVIRON)
5774
  if requested > free_mem:
5775
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
5776
                               " needed %s MiB, available %s MiB" %
5777
                               (node, reason, requested, free_mem),
5778
                               errors.ECODE_NORES)
5779

    
5780

    
5781
def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
5782
  """Checks if nodes have enough free disk space in the all VGs.
5783

5784
  This function check if all given nodes have the needed amount of
5785
  free disk. In case any node has less disk or we cannot get the
5786
  information from the node, this function raise an OpPrereqError
5787
  exception.
5788

5789
  @type lu: C{LogicalUnit}
5790
  @param lu: a logical unit from which we get configuration data
5791
  @type nodenames: C{list}
5792
  @param nodenames: the list of node names to check
5793
  @type req_sizes: C{dict}
5794
  @param req_sizes: the hash of vg and corresponding amount of disk in
5795
      MiB to check for
5796
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
5797
      or we cannot check the node
5798

5799
  """
5800
  for vg, req_size in req_sizes.items():
5801
    _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
5802

    
5803

    
5804
def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
5805
  """Checks if nodes have enough free disk space in the specified VG.
5806

5807
  This function check if all given nodes have the needed amount of
5808
  free disk. In case any node has less disk or we cannot get the
5809
  information from the node, this function raise an OpPrereqError
5810
  exception.
5811

5812
  @type lu: C{LogicalUnit}
5813
  @param lu: a logical unit from which we get configuration data
5814
  @type nodenames: C{list}
5815
  @param nodenames: the list of node names to check
5816
  @type vg: C{str}
5817
  @param vg: the volume group to check
5818
  @type requested: C{int}
5819
  @param requested: the amount of disk in MiB to check for
5820
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
5821
      or we cannot check the node
5822

5823
  """
5824
  nodeinfo = lu.rpc.call_node_info(nodenames, vg, None)
5825
  for node in nodenames:
5826
    info = nodeinfo[node]
5827
    info.Raise("Cannot get current information from node %s" % node,
5828
               prereq=True, ecode=errors.ECODE_ENVIRON)
5829
    vg_free = info.payload.get("vg_free", None)
5830
    if not isinstance(vg_free, int):
5831
      raise errors.OpPrereqError("Can't compute free disk space on node"
5832
                                 " %s for vg %s, result was '%s'" %
5833
                                 (node, vg, vg_free), errors.ECODE_ENVIRON)
5834
    if requested > vg_free:
5835
      raise errors.OpPrereqError("Not enough disk space on target node %s"
5836
                                 " vg %s: required %d MiB, available %d MiB" %
5837
                                 (node, vg, requested, vg_free),
5838
                                 errors.ECODE_NORES)
5839

    
5840

    
5841
def _CheckNodesPhysicalCPUs(lu, nodenames, requested, hypervisor_name):
5842
  """Checks if nodes have enough physical CPUs
5843

5844
  This function checks if all given nodes have the needed number of
5845
  physical CPUs. In case any node has less CPUs or we cannot get the
5846
  information from the node, this function raises an OpPrereqError
5847
  exception.
5848

5849
  @type lu: C{LogicalUnit}
5850
  @param lu: a logical unit from which we get configuration data
5851
  @type nodenames: C{list}
5852
  @param nodenames: the list of node names to check
5853
  @type requested: C{int}
5854
  @param requested: the minimum acceptable number of physical CPUs
5855
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
5856
      or we cannot check the node
5857

5858
  """
5859
  nodeinfo = lu.rpc.call_node_info(nodenames, None, hypervisor_name)
5860
  for node in nodenames:
5861
    info = nodeinfo[node]
5862
    info.Raise("Cannot get current information from node %s" % node,
5863
               prereq=True, ecode=errors.ECODE_ENVIRON)
5864
    num_cpus = info.payload.get("cpu_total", None)
5865
    if not isinstance(num_cpus, int):
5866
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
5867
                                 " on node %s, result was '%s'" %
5868
                                 (node, num_cpus), errors.ECODE_ENVIRON)
5869
    if requested > num_cpus:
5870
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
5871
                                 "required" % (node, num_cpus, requested),
5872
                                 errors.ECODE_NORES)
5873

    
5874

    
5875
class LUInstanceStartup(LogicalUnit):
5876
  """Starts an instance.
5877

5878
  """
5879
  HPATH = "instance-start"
5880
  HTYPE = constants.HTYPE_INSTANCE
5881
  REQ_BGL = False
5882

    
5883
  def CheckArguments(self):
5884
    # extra beparams
5885
    if self.op.beparams:
5886
      # fill the beparams dict
5887
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5888

    
5889
  def ExpandNames(self):
5890
    self._ExpandAndLockInstance()
5891

    
5892
  def BuildHooksEnv(self):
5893
    """Build hooks env.
5894

5895
    This runs on master, primary and secondary nodes of the instance.
5896

5897
    """
5898
    env = {
5899
      "FORCE": self.op.force,
5900
      }
5901

    
5902
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5903

    
5904
    return env
5905

    
5906
  def BuildHooksNodes(self):
5907
    """Build hooks nodes.
5908

5909
    """
5910
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5911
    return (nl, nl)
5912

    
5913
  def CheckPrereq(self):
5914
    """Check prerequisites.
5915

5916
    This checks that the instance is in the cluster.
5917

5918
    """
5919
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5920
    assert self.instance is not None, \
5921
      "Cannot retrieve locked instance %s" % self.op.instance_name
5922

    
5923
    # extra hvparams
5924
    if self.op.hvparams:
5925
      # check hypervisor parameter syntax (locally)
5926
      cluster = self.cfg.GetClusterInfo()
5927
      utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5928
      filled_hvp = cluster.FillHV(instance)
5929
      filled_hvp.update(self.op.hvparams)
5930
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
5931
      hv_type.CheckParameterSyntax(filled_hvp)
5932
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
5933

    
5934
    self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
5935

    
5936
    if self.primary_offline and self.op.ignore_offline_nodes:
5937
      self.proc.LogWarning("Ignoring offline primary node")
5938

    
5939
      if self.op.hvparams or self.op.beparams:
5940
        self.proc.LogWarning("Overridden parameters are ignored")
5941
    else:
5942
      _CheckNodeOnline(self, instance.primary_node)
5943

    
5944
      bep = self.cfg.GetClusterInfo().FillBE(instance)
5945

    
5946
      # check bridges existence
5947
      _CheckInstanceBridgesExist(self, instance)
5948

    
5949
      remote_info = self.rpc.call_instance_info(instance.primary_node,
5950
                                                instance.name,
5951
                                                instance.hypervisor)
5952
      remote_info.Raise("Error checking node %s" % instance.primary_node,
5953
                        prereq=True, ecode=errors.ECODE_ENVIRON)
5954
      if not remote_info.payload: # not running already
5955
        _CheckNodeFreeMemory(self, instance.primary_node,
5956
                             "starting instance %s" % instance.name,
5957
                             bep[constants.BE_MEMORY], instance.hypervisor)
5958

    
5959
  def Exec(self, feedback_fn):
5960
    """Start the instance.
5961

5962
    """
5963
    instance = self.instance
5964
    force = self.op.force
5965

    
5966
    if not self.op.no_remember:
5967
      self.cfg.MarkInstanceUp(instance.name)
5968

    
5969
    if self.primary_offline:
5970
      assert self.op.ignore_offline_nodes
5971
      self.proc.LogInfo("Primary node offline, marked instance as started")
5972
    else:
5973
      node_current = instance.primary_node
5974

    
5975
      _StartInstanceDisks(self, instance, force)
5976

    
5977
      result = self.rpc.call_instance_start(node_current, instance,
5978
                                            self.op.hvparams, self.op.beparams,
5979
                                            self.op.startup_paused)
5980
      msg = result.fail_msg
5981
      if msg:
5982
        _ShutdownInstanceDisks(self, instance)
5983
        raise errors.OpExecError("Could not start instance: %s" % msg)
5984

    
5985

    
5986
class LUInstanceReboot(LogicalUnit):
5987
  """Reboot an instance.
5988

5989
  """
5990
  HPATH = "instance-reboot"
5991
  HTYPE = constants.HTYPE_INSTANCE
5992
  REQ_BGL = False
5993

    
5994
  def ExpandNames(self):
5995
    self._ExpandAndLockInstance()
5996

    
5997
  def BuildHooksEnv(self):
5998
    """Build hooks env.
5999

6000
    This runs on master, primary and secondary nodes of the instance.
6001

6002
    """
6003
    env = {
6004
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
6005
      "REBOOT_TYPE": self.op.reboot_type,
6006
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6007
      }
6008

    
6009
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6010

    
6011
    return env
6012

    
6013
  def BuildHooksNodes(self):
6014
    """Build hooks nodes.
6015

6016
    """
6017
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6018
    return (nl, nl)
6019

    
6020
  def CheckPrereq(self):
6021
    """Check prerequisites.
6022

6023
    This checks that the instance is in the cluster.
6024

6025
    """
6026
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6027
    assert self.instance is not None, \
6028
      "Cannot retrieve locked instance %s" % self.op.instance_name
6029

    
6030
    _CheckNodeOnline(self, instance.primary_node)
6031

    
6032
    # check bridges existence
6033
    _CheckInstanceBridgesExist(self, instance)
6034

    
6035
  def Exec(self, feedback_fn):
6036
    """Reboot the instance.
6037

6038
    """
6039
    instance = self.instance
6040
    ignore_secondaries = self.op.ignore_secondaries
6041
    reboot_type = self.op.reboot_type
6042

    
6043
    remote_info = self.rpc.call_instance_info(instance.primary_node,
6044
                                              instance.name,
6045
                                              instance.hypervisor)
6046
    remote_info.Raise("Error checking node %s" % instance.primary_node)
6047
    instance_running = bool(remote_info.payload)
6048

    
6049
    node_current = instance.primary_node
6050

    
6051
    if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
6052
                                            constants.INSTANCE_REBOOT_HARD]:
6053
      for disk in instance.disks:
6054
        self.cfg.SetDiskID(disk, node_current)
6055
      result = self.rpc.call_instance_reboot(node_current, instance,
6056
                                             reboot_type,
6057
                                             self.op.shutdown_timeout)
6058
      result.Raise("Could not reboot instance")
6059
    else:
6060
      if instance_running:
6061
        result = self.rpc.call_instance_shutdown(node_current, instance,
6062
                                                 self.op.shutdown_timeout)
6063
        result.Raise("Could not shutdown instance for full reboot")
6064
        _ShutdownInstanceDisks(self, instance)
6065
      else:
6066
        self.LogInfo("Instance %s was already stopped, starting now",
6067
                     instance.name)
6068
      _StartInstanceDisks(self, instance, ignore_secondaries)
6069
      result = self.rpc.call_instance_start(node_current, instance,
6070
                                            None, None, False)
6071
      msg = result.fail_msg
6072
      if msg:
6073
        _ShutdownInstanceDisks(self, instance)
6074
        raise errors.OpExecError("Could not start instance for"
6075
                                 " full reboot: %s" % msg)
6076

    
6077
    self.cfg.MarkInstanceUp(instance.name)
6078

    
6079

    
6080
class LUInstanceShutdown(LogicalUnit):
6081
  """Shutdown an instance.
6082

6083
  """
6084
  HPATH = "instance-stop"
6085
  HTYPE = constants.HTYPE_INSTANCE
6086
  REQ_BGL = False
6087

    
6088
  def ExpandNames(self):
6089
    self._ExpandAndLockInstance()
6090

    
6091
  def BuildHooksEnv(self):
6092
    """Build hooks env.
6093

6094
    This runs on master, primary and secondary nodes of the instance.
6095

6096
    """
6097
    env = _BuildInstanceHookEnvByObject(self, self.instance)
6098
    env["TIMEOUT"] = self.op.timeout
6099
    return env
6100

    
6101
  def BuildHooksNodes(self):
6102
    """Build hooks nodes.
6103

6104
    """
6105
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6106
    return (nl, nl)
6107

    
6108
  def CheckPrereq(self):
6109
    """Check prerequisites.
6110

6111
    This checks that the instance is in the cluster.
6112

6113
    """
6114
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6115
    assert self.instance is not None, \
6116
      "Cannot retrieve locked instance %s" % self.op.instance_name
6117

    
6118
    self.primary_offline = \
6119
      self.cfg.GetNodeInfo(self.instance.primary_node).offline
6120

    
6121
    if self.primary_offline and self.op.ignore_offline_nodes:
6122
      self.proc.LogWarning("Ignoring offline primary node")
6123
    else:
6124
      _CheckNodeOnline(self, self.instance.primary_node)
6125

    
6126
  def Exec(self, feedback_fn):
6127
    """Shutdown the instance.
6128

6129
    """
6130
    instance = self.instance
6131
    node_current = instance.primary_node
6132
    timeout = self.op.timeout
6133

    
6134
    if not self.op.no_remember:
6135
      self.cfg.MarkInstanceDown(instance.name)
6136

    
6137
    if self.primary_offline:
6138
      assert self.op.ignore_offline_nodes
6139
      self.proc.LogInfo("Primary node offline, marked instance as stopped")
6140
    else:
6141
      result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
6142
      msg = result.fail_msg
6143
      if msg:
6144
        self.proc.LogWarning("Could not shutdown instance: %s" % msg)
6145

    
6146
      _ShutdownInstanceDisks(self, instance)
6147

    
6148

    
6149
class LUInstanceReinstall(LogicalUnit):
6150
  """Reinstall an instance.
6151

6152
  """
6153
  HPATH = "instance-reinstall"
6154
  HTYPE = constants.HTYPE_INSTANCE
6155
  REQ_BGL = False
6156

    
6157
  def ExpandNames(self):
6158
    self._ExpandAndLockInstance()
6159

    
6160
  def BuildHooksEnv(self):
6161
    """Build hooks env.
6162

6163
    This runs on master, primary and secondary nodes of the instance.
6164

6165
    """
6166
    return _BuildInstanceHookEnvByObject(self, self.instance)
6167

    
6168
  def BuildHooksNodes(self):
6169
    """Build hooks nodes.
6170

6171
    """
6172
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6173
    return (nl, nl)
6174

    
6175
  def CheckPrereq(self):
6176
    """Check prerequisites.
6177

6178
    This checks that the instance is in the cluster and is not running.
6179

6180
    """
6181
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6182
    assert instance is not None, \
6183
      "Cannot retrieve locked instance %s" % self.op.instance_name
6184
    _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
6185
                     " offline, cannot reinstall")
6186
    for node in instance.secondary_nodes:
6187
      _CheckNodeOnline(self, node, "Instance secondary node offline,"
6188
                       " cannot reinstall")
6189

    
6190
    if instance.disk_template == constants.DT_DISKLESS:
6191
      raise errors.OpPrereqError("Instance '%s' has no disks" %
6192
                                 self.op.instance_name,
6193
                                 errors.ECODE_INVAL)
6194
    _CheckInstanceDown(self, instance, "cannot reinstall")
6195

    
6196
    if self.op.os_type is not None:
6197
      # OS verification
6198
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
6199
      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
6200
      instance_os = self.op.os_type
6201
    else:
6202
      instance_os = instance.os
6203

    
6204
    nodelist = list(instance.all_nodes)
6205

    
6206
    if self.op.osparams:
6207
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
6208
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
6209
      self.os_inst = i_osdict # the new dict (without defaults)
6210
    else:
6211
      self.os_inst = None
6212

    
6213
    self.instance = instance
6214

    
6215
  def Exec(self, feedback_fn):
6216
    """Reinstall the instance.
6217

6218
    """
6219
    inst = self.instance
6220

    
6221
    if self.op.os_type is not None:
6222
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
6223
      inst.os = self.op.os_type
6224
      # Write to configuration
6225
      self.cfg.Update(inst, feedback_fn)
6226

    
6227
    _StartInstanceDisks(self, inst, None)
6228
    try:
6229
      feedback_fn("Running the instance OS create scripts...")
6230
      # FIXME: pass debug option from opcode to backend
6231
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
6232
                                             self.op.debug_level,
6233
                                             osparams=self.os_inst)
6234
      result.Raise("Could not install OS for instance %s on node %s" %
6235
                   (inst.name, inst.primary_node))
6236
    finally:
6237
      _ShutdownInstanceDisks(self, inst)
6238

    
6239

    
6240
class LUInstanceRecreateDisks(LogicalUnit):
6241
  """Recreate an instance's missing disks.
6242

6243
  """
6244
  HPATH = "instance-recreate-disks"
6245
  HTYPE = constants.HTYPE_INSTANCE
6246
  REQ_BGL = False
6247

    
6248
  def CheckArguments(self):
6249
    # normalise the disk list
6250
    self.op.disks = sorted(frozenset(self.op.disks))
6251

    
6252
  def ExpandNames(self):
6253
    self._ExpandAndLockInstance()
6254
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6255
    if self.op.nodes:
6256
      self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
6257
      self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
6258
    else:
6259
      self.needed_locks[locking.LEVEL_NODE] = []
6260

    
6261
  def DeclareLocks(self, level):
6262
    if level == locking.LEVEL_NODE:
6263
      # if we replace the nodes, we only need to lock the old primary,
6264
      # otherwise we need to lock all nodes for disk re-creation
6265
      primary_only = bool(self.op.nodes)
6266
      self._LockInstancesNodes(primary_only=primary_only)
6267

    
6268
  def BuildHooksEnv(self):
6269
    """Build hooks env.
6270

6271
    This runs on master, primary and secondary nodes of the instance.
6272

6273
    """
6274
    return _BuildInstanceHookEnvByObject(self, self.instance)
6275

    
6276
  def BuildHooksNodes(self):
6277
    """Build hooks nodes.
6278

6279
    """
6280
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6281
    return (nl, nl)
6282

    
6283
  def CheckPrereq(self):
6284
    """Check prerequisites.
6285

6286
    This checks that the instance is in the cluster and is not running.
6287

6288
    """
6289
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6290
    assert instance is not None, \
6291
      "Cannot retrieve locked instance %s" % self.op.instance_name
6292
    if self.op.nodes:
6293
      if len(self.op.nodes) != len(instance.all_nodes):
6294
        raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
6295
                                   " %d replacement nodes were specified" %
6296
                                   (instance.name, len(instance.all_nodes),
6297
                                    len(self.op.nodes)),
6298
                                   errors.ECODE_INVAL)
6299
      assert instance.disk_template != constants.DT_DRBD8 or \
6300
          len(self.op.nodes) == 2
6301
      assert instance.disk_template != constants.DT_PLAIN or \
6302
          len(self.op.nodes) == 1
6303
      primary_node = self.op.nodes[0]
6304
    else:
6305
      primary_node = instance.primary_node
6306
    _CheckNodeOnline(self, primary_node)
6307

    
6308
    if instance.disk_template == constants.DT_DISKLESS:
6309
      raise errors.OpPrereqError("Instance '%s' has no disks" %
6310
                                 self.op.instance_name, errors.ECODE_INVAL)
6311
    # if we replace nodes *and* the old primary is offline, we don't
6312
    # check
6313
    assert instance.primary_node in self.needed_locks[locking.LEVEL_NODE]
6314
    old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
6315
    if not (self.op.nodes and old_pnode.offline):
6316
      _CheckInstanceDown(self, instance, "cannot recreate disks")
6317

    
6318
    if not self.op.disks:
6319
      self.op.disks = range(len(instance.disks))
6320
    else:
6321
      for idx in self.op.disks:
6322
        if idx >= len(instance.disks):
6323
          raise errors.OpPrereqError("Invalid disk index '%s'" % idx,
6324
                                     errors.ECODE_INVAL)
6325
    if self.op.disks != range(len(instance.disks)) and self.op.nodes:
6326
      raise errors.OpPrereqError("Can't recreate disks partially and"
6327
                                 " change the nodes at the same time",
6328
                                 errors.ECODE_INVAL)
6329
    self.instance = instance
6330

    
6331
  def Exec(self, feedback_fn):
6332
    """Recreate the disks.
6333

6334
    """
6335
    instance = self.instance
6336

    
6337
    to_skip = []
6338
    mods = [] # keeps track of needed logical_id changes
6339

    
6340
    for idx, disk in enumerate(instance.disks):
6341
      if idx not in self.op.disks: # disk idx has not been passed in
6342
        to_skip.append(idx)
6343
        continue
6344
      # update secondaries for disks, if needed
6345
      if self.op.nodes:
6346
        if disk.dev_type == constants.LD_DRBD8:
6347
          # need to update the nodes and minors
6348
          assert len(self.op.nodes) == 2
6349
          assert len(disk.logical_id) == 6 # otherwise disk internals
6350
                                           # have changed
6351
          (_, _, old_port, _, _, old_secret) = disk.logical_id
6352
          new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name)
6353
          new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
6354
                    new_minors[0], new_minors[1], old_secret)
6355
          assert len(disk.logical_id) == len(new_id)
6356
          mods.append((idx, new_id))
6357

    
6358
    # now that we have passed all asserts above, we can apply the mods
6359
    # in a single run (to avoid partial changes)
6360
    for idx, new_id in mods:
6361
      instance.disks[idx].logical_id = new_id
6362

    
6363
    # change primary node, if needed
6364
    if self.op.nodes:
6365
      instance.primary_node = self.op.nodes[0]
6366
      self.LogWarning("Changing the instance's nodes, you will have to"
6367
                      " remove any disks left on the older nodes manually")
6368

    
6369
    if self.op.nodes:
6370
      self.cfg.Update(instance, feedback_fn)
6371

    
6372
    _CreateDisks(self, instance, to_skip=to_skip)
6373

    
6374

    
6375
class LUInstanceRename(LogicalUnit):
6376
  """Rename an instance.
6377

6378
  """
6379
  HPATH = "instance-rename"
6380
  HTYPE = constants.HTYPE_INSTANCE
6381

    
6382
  def CheckArguments(self):
6383
    """Check arguments.
6384

6385
    """
6386
    if self.op.ip_check and not self.op.name_check:
6387
      # TODO: make the ip check more flexible and not depend on the name check
6388
      raise errors.OpPrereqError("IP address check requires a name check",
6389
                                 errors.ECODE_INVAL)
6390

    
6391
  def BuildHooksEnv(self):
6392
    """Build hooks env.
6393

6394
    This runs on master, primary and secondary nodes of the instance.
6395

6396
    """
6397
    env = _BuildInstanceHookEnvByObject(self, self.instance)
6398
    env["INSTANCE_NEW_NAME"] = self.op.new_name
6399
    return env
6400

    
6401
  def BuildHooksNodes(self):
6402
    """Build hooks nodes.
6403

6404
    """
6405
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6406
    return (nl, nl)
6407

    
6408
  def CheckPrereq(self):
6409
    """Check prerequisites.
6410

6411
    This checks that the instance is in the cluster and is not running.
6412

6413
    """
6414
    self.op.instance_name = _ExpandInstanceName(self.cfg,
6415
                                                self.op.instance_name)
6416
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6417
    assert instance is not None
6418
    _CheckNodeOnline(self, instance.primary_node)
6419
    _CheckInstanceDown(self, instance, "cannot rename")
6420
    self.instance = instance
6421

    
6422
    new_name = self.op.new_name
6423
    if self.op.name_check:
6424
      hostname = netutils.GetHostname(name=new_name)
6425
      if hostname != new_name:
6426
        self.LogInfo("Resolved given name '%s' to '%s'", new_name,
6427
                     hostname.name)
6428
      if not utils.MatchNameComponent(self.op.new_name, [hostname.name]):
6429
        raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
6430
                                    " same as given hostname '%s'") %
6431
                                    (hostname.name, self.op.new_name),
6432
                                    errors.ECODE_INVAL)
6433
      new_name = self.op.new_name = hostname.name
6434
      if (self.op.ip_check and
6435
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
6436
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
6437
                                   (hostname.ip, new_name),
6438
                                   errors.ECODE_NOTUNIQUE)
6439

    
6440
    instance_list = self.cfg.GetInstanceList()
6441
    if new_name in instance_list and new_name != instance.name:
6442
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
6443
                                 new_name, errors.ECODE_EXISTS)
6444

    
6445
  def Exec(self, feedback_fn):
6446
    """Rename the instance.
6447

6448
    """
6449
    inst = self.instance
6450
    old_name = inst.name
6451

    
6452
    rename_file_storage = False
6453
    if (inst.disk_template in constants.DTS_FILEBASED and
6454
        self.op.new_name != inst.name):
6455
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
6456
      rename_file_storage = True
6457

    
6458
    self.cfg.RenameInstance(inst.name, self.op.new_name)
6459
    # Change the instance lock. This is definitely safe while we hold the BGL.
6460
    # Otherwise the new lock would have to be added in acquired mode.
6461
    assert self.REQ_BGL
6462
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
6463
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
6464

    
6465
    # re-read the instance from the configuration after rename
6466
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
6467

    
6468
    if rename_file_storage:
6469
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
6470
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
6471
                                                     old_file_storage_dir,
6472
                                                     new_file_storage_dir)
6473
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
6474
                   " (but the instance has been renamed in Ganeti)" %
6475
                   (inst.primary_node, old_file_storage_dir,
6476
                    new_file_storage_dir))
6477

    
6478
    _StartInstanceDisks(self, inst, None)
6479
    try:
6480
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
6481
                                                 old_name, self.op.debug_level)
6482
      msg = result.fail_msg
6483
      if msg:
6484
        msg = ("Could not run OS rename script for instance %s on node %s"
6485
               " (but the instance has been renamed in Ganeti): %s" %
6486
               (inst.name, inst.primary_node, msg))
6487
        self.proc.LogWarning(msg)
6488
    finally:
6489
      _ShutdownInstanceDisks(self, inst)
6490

    
6491
    return inst.name
6492

    
6493

    
6494
class LUInstanceRemove(LogicalUnit):
6495
  """Remove an instance.
6496

6497
  """
6498
  HPATH = "instance-remove"
6499
  HTYPE = constants.HTYPE_INSTANCE
6500
  REQ_BGL = False
6501

    
6502
  def ExpandNames(self):
6503
    self._ExpandAndLockInstance()
6504
    self.needed_locks[locking.LEVEL_NODE] = []
6505
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6506

    
6507
  def DeclareLocks(self, level):
6508
    if level == locking.LEVEL_NODE:
6509
      self._LockInstancesNodes()
6510

    
6511
  def BuildHooksEnv(self):
6512
    """Build hooks env.
6513

6514
    This runs on master, primary and secondary nodes of the instance.
6515

6516
    """
6517
    env = _BuildInstanceHookEnvByObject(self, self.instance)
6518
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
6519
    return env
6520

    
6521
  def BuildHooksNodes(self):
6522
    """Build hooks nodes.
6523

6524
    """
6525
    nl = [self.cfg.GetMasterNode()]
6526
    nl_post = list(self.instance.all_nodes) + nl
6527
    return (nl, nl_post)
6528

    
6529
  def CheckPrereq(self):
6530
    """Check prerequisites.
6531

6532
    This checks that the instance is in the cluster.
6533

6534
    """
6535
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6536
    assert self.instance is not None, \
6537
      "Cannot retrieve locked instance %s" % self.op.instance_name
6538

    
6539
  def Exec(self, feedback_fn):
6540
    """Remove the instance.
6541

6542
    """
6543
    instance = self.instance
6544
    logging.info("Shutting down instance %s on node %s",
6545
                 instance.name, instance.primary_node)
6546

    
6547
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
6548
                                             self.op.shutdown_timeout)
6549
    msg = result.fail_msg
6550
    if msg:
6551
      if self.op.ignore_failures:
6552
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
6553
      else:
6554
        raise errors.OpExecError("Could not shutdown instance %s on"
6555
                                 " node %s: %s" %
6556
                                 (instance.name, instance.primary_node, msg))
6557

    
6558
    _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
6559

    
6560

    
6561
def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
6562
  """Utility function to remove an instance.
6563

6564
  """
6565
  logging.info("Removing block devices for instance %s", instance.name)
6566

    
6567
  if not _RemoveDisks(lu, instance):
6568
    if not ignore_failures:
6569
      raise errors.OpExecError("Can't remove instance's disks")
6570
    feedback_fn("Warning: can't remove instance's disks")
6571

    
6572
  logging.info("Removing instance %s out of cluster config", instance.name)
6573

    
6574
  lu.cfg.RemoveInstance(instance.name)
6575

    
6576
  assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
6577
    "Instance lock removal conflict"
6578

    
6579
  # Remove lock for the instance
6580
  lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
6581

    
6582

    
6583
class LUInstanceQuery(NoHooksLU):
6584
  """Logical unit for querying instances.
6585

6586
  """
6587
  # pylint: disable=W0142
6588
  REQ_BGL = False
6589

    
6590
  def CheckArguments(self):
6591
    self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
6592
                             self.op.output_fields, self.op.use_locking)
6593

    
6594
  def ExpandNames(self):
6595
    self.iq.ExpandNames(self)
6596

    
6597
  def DeclareLocks(self, level):
6598
    self.iq.DeclareLocks(self, level)
6599

    
6600
  def Exec(self, feedback_fn):
6601
    return self.iq.OldStyleQuery(self)
6602

    
6603

    
6604
class LUInstanceFailover(LogicalUnit):
6605
  """Failover an instance.
6606

6607
  """
6608
  HPATH = "instance-failover"
6609
  HTYPE = constants.HTYPE_INSTANCE
6610
  REQ_BGL = False
6611

    
6612
  def CheckArguments(self):
6613
    """Check the arguments.
6614

6615
    """
6616
    self.iallocator = getattr(self.op, "iallocator", None)
6617
    self.target_node = getattr(self.op, "target_node", None)
6618

    
6619
  def ExpandNames(self):
6620
    self._ExpandAndLockInstance()
6621

    
6622
    if self.op.target_node is not None:
6623
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6624

    
6625
    self.needed_locks[locking.LEVEL_NODE] = []
6626
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6627

    
6628
    ignore_consistency = self.op.ignore_consistency
6629
    shutdown_timeout = self.op.shutdown_timeout
6630
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
6631
                                       cleanup=False,
6632
                                       failover=True,
6633
                                       ignore_consistency=ignore_consistency,
6634
                                       shutdown_timeout=shutdown_timeout)
6635
    self.tasklets = [self._migrater]
6636

    
6637
  def DeclareLocks(self, level):
6638
    if level == locking.LEVEL_NODE:
6639
      instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
6640
      if instance.disk_template in constants.DTS_EXT_MIRROR:
6641
        if self.op.target_node is None:
6642
          self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6643
        else:
6644
          self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
6645
                                                   self.op.target_node]
6646
        del self.recalculate_locks[locking.LEVEL_NODE]
6647
      else:
6648
        self._LockInstancesNodes()
6649

    
6650
  def BuildHooksEnv(self):
6651
    """Build hooks env.
6652

6653
    This runs on master, primary and secondary nodes of the instance.
6654

6655
    """
6656
    instance = self._migrater.instance
6657
    source_node = instance.primary_node
6658
    target_node = self.op.target_node
6659
    env = {
6660
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
6661
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6662
      "OLD_PRIMARY": source_node,
6663
      "NEW_PRIMARY": target_node,
6664
      }
6665

    
6666
    if instance.disk_template in constants.DTS_INT_MIRROR:
6667
      env["OLD_SECONDARY"] = instance.secondary_nodes[0]
6668
      env["NEW_SECONDARY"] = source_node
6669
    else:
6670
      env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
6671

    
6672
    env.update(_BuildInstanceHookEnvByObject(self, instance))
6673

    
6674
    return env
6675

    
6676
  def BuildHooksNodes(self):
6677
    """Build hooks nodes.
6678

6679
    """
6680
    instance = self._migrater.instance
6681
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
6682
    return (nl, nl + [instance.primary_node])
6683

    
6684

    
6685
class LUInstanceMigrate(LogicalUnit):
6686
  """Migrate an instance.
6687

6688
  This is migration without shutting down, compared to the failover,
6689
  which is done with shutdown.
6690

6691
  """
6692
  HPATH = "instance-migrate"
6693
  HTYPE = constants.HTYPE_INSTANCE
6694
  REQ_BGL = False
6695

    
6696
  def ExpandNames(self):
6697
    self._ExpandAndLockInstance()
6698

    
6699
    if self.op.target_node is not None:
6700
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6701

    
6702
    self.needed_locks[locking.LEVEL_NODE] = []
6703
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6704

    
6705
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
6706
                                       cleanup=self.op.cleanup,
6707
                                       failover=False,
6708
                                       fallback=self.op.allow_failover)
6709
    self.tasklets = [self._migrater]
6710

    
6711
  def DeclareLocks(self, level):
6712
    if level == locking.LEVEL_NODE:
6713
      instance = self.context.cfg.GetInstanceInfo(self.op.instance_name)
6714
      if instance.disk_template in constants.DTS_EXT_MIRROR:
6715
        if self.op.target_node is None:
6716
          self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6717
        else:
6718
          self.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
6719
                                                   self.op.target_node]
6720
        del self.recalculate_locks[locking.LEVEL_NODE]
6721
      else:
6722
        self._LockInstancesNodes()
6723

    
6724
  def BuildHooksEnv(self):
6725
    """Build hooks env.
6726

6727
    This runs on master, primary and secondary nodes of the instance.
6728

6729
    """
6730
    instance = self._migrater.instance
6731
    source_node = instance.primary_node
6732
    target_node = self.op.target_node
6733
    env = _BuildInstanceHookEnvByObject(self, instance)
6734
    env.update({
6735
      "MIGRATE_LIVE": self._migrater.live,
6736
      "MIGRATE_CLEANUP": self.op.cleanup,
6737
      "OLD_PRIMARY": source_node,
6738
      "NEW_PRIMARY": target_node,
6739
      })
6740

    
6741
    if instance.disk_template in constants.DTS_INT_MIRROR:
6742
      env["OLD_SECONDARY"] = target_node
6743
      env["NEW_SECONDARY"] = source_node
6744
    else:
6745
      env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = None
6746

    
6747
    return env
6748

    
6749
  def BuildHooksNodes(self):
6750
    """Build hooks nodes.
6751

6752
    """
6753
    instance = self._migrater.instance
6754
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
6755
    return (nl, nl + [instance.primary_node])
6756

    
6757

    
6758
class LUInstanceMove(LogicalUnit):
6759
  """Move an instance by data-copying.
6760

6761
  """
6762
  HPATH = "instance-move"
6763
  HTYPE = constants.HTYPE_INSTANCE
6764
  REQ_BGL = False
6765

    
6766
  def ExpandNames(self):
6767
    self._ExpandAndLockInstance()
6768
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
6769
    self.op.target_node = target_node
6770
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
6771
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6772

    
6773
  def DeclareLocks(self, level):
6774
    if level == locking.LEVEL_NODE:
6775
      self._LockInstancesNodes(primary_only=True)
6776

    
6777
  def BuildHooksEnv(self):
6778
    """Build hooks env.
6779

6780
    This runs on master, primary and secondary nodes of the instance.
6781

6782
    """
6783
    env = {
6784
      "TARGET_NODE": self.op.target_node,
6785
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
6786
      }
6787
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6788
    return env
6789

    
6790
  def BuildHooksNodes(self):
6791
    """Build hooks nodes.
6792

6793
    """
6794
    nl = [
6795
      self.cfg.GetMasterNode(),
6796
      self.instance.primary_node,
6797
      self.op.target_node,
6798
      ]
6799
    return (nl, nl)
6800

    
6801
  def CheckPrereq(self):
6802
    """Check prerequisites.
6803

6804
    This checks that the instance is in the cluster.
6805

6806
    """
6807
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6808
    assert self.instance is not None, \
6809
      "Cannot retrieve locked instance %s" % self.op.instance_name
6810

    
6811
    node = self.cfg.GetNodeInfo(self.op.target_node)
6812
    assert node is not None, \
6813
      "Cannot retrieve locked node %s" % self.op.target_node
6814

    
6815
    self.target_node = target_node = node.name
6816

    
6817
    if target_node == instance.primary_node:
6818
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
6819
                                 (instance.name, target_node),
6820
                                 errors.ECODE_STATE)
6821

    
6822
    bep = self.cfg.GetClusterInfo().FillBE(instance)
6823

    
6824
    for idx, dsk in enumerate(instance.disks):
6825
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
6826
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
6827
                                   " cannot copy" % idx, errors.ECODE_STATE)
6828

    
6829
    _CheckNodeOnline(self, target_node)
6830
    _CheckNodeNotDrained(self, target_node)
6831
    _CheckNodeVmCapable(self, target_node)
6832

    
6833
    if instance.admin_up:
6834
      # check memory requirements on the secondary node
6835
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
6836
                           instance.name, bep[constants.BE_MEMORY],
6837
                           instance.hypervisor)
6838
    else:
6839
      self.LogInfo("Not checking memory on the secondary node as"
6840
                   " instance will not be started")
6841

    
6842
    # check bridge existance
6843
    _CheckInstanceBridgesExist(self, instance, node=target_node)
6844

    
6845
  def Exec(self, feedback_fn):
6846
    """Move an instance.
6847

6848
    The move is done by shutting it down on its present node, copying
6849
    the data over (slow) and starting it on the new node.
6850

6851
    """
6852
    instance = self.instance
6853

    
6854
    source_node = instance.primary_node
6855
    target_node = self.target_node
6856

    
6857
    self.LogInfo("Shutting down instance %s on source node %s",
6858
                 instance.name, source_node)
6859

    
6860
    result = self.rpc.call_instance_shutdown(source_node, instance,
6861
                                             self.op.shutdown_timeout)
6862
    msg = result.fail_msg
6863
    if msg:
6864
      if self.op.ignore_consistency:
6865
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
6866
                             " Proceeding anyway. Please make sure node"
6867
                             " %s is down. Error details: %s",
6868
                             instance.name, source_node, source_node, msg)
6869
      else:
6870
        raise errors.OpExecError("Could not shutdown instance %s on"
6871
                                 " node %s: %s" %
6872
                                 (instance.name, source_node, msg))
6873

    
6874
    # create the target disks
6875
    try:
6876
      _CreateDisks(self, instance, target_node=target_node)
6877
    except errors.OpExecError:
6878
      self.LogWarning("Device creation failed, reverting...")
6879
      try:
6880
        _RemoveDisks(self, instance, target_node=target_node)
6881
      finally:
6882
        self.cfg.ReleaseDRBDMinors(instance.name)
6883
        raise
6884

    
6885
    cluster_name = self.cfg.GetClusterInfo().cluster_name
6886

    
6887
    errs = []
6888
    # activate, get path, copy the data over
6889
    for idx, disk in enumerate(instance.disks):
6890
      self.LogInfo("Copying data for disk %d", idx)
6891
      result = self.rpc.call_blockdev_assemble(target_node, disk,
6892
                                               instance.name, True, idx)
6893
      if result.fail_msg:
6894
        self.LogWarning("Can't assemble newly created disk %d: %s",
6895
                        idx, result.fail_msg)
6896
        errs.append(result.fail_msg)
6897
        break
6898
      dev_path = result.payload
6899
      result = self.rpc.call_blockdev_export(source_node, disk,
6900
                                             target_node, dev_path,
6901
                                             cluster_name)
6902
      if result.fail_msg:
6903
        self.LogWarning("Can't copy data over for disk %d: %s",
6904
                        idx, result.fail_msg)
6905
        errs.append(result.fail_msg)
6906
        break
6907

    
6908
    if errs:
6909
      self.LogWarning("Some disks failed to copy, aborting")
6910
      try:
6911
        _RemoveDisks(self, instance, target_node=target_node)
6912
      finally:
6913
        self.cfg.ReleaseDRBDMinors(instance.name)
6914
        raise errors.OpExecError("Errors during disk copy: %s" %
6915
                                 (",".join(errs),))
6916

    
6917
    instance.primary_node = target_node
6918
    self.cfg.Update(instance, feedback_fn)
6919

    
6920
    self.LogInfo("Removing the disks on the original node")
6921
    _RemoveDisks(self, instance, target_node=source_node)
6922

    
6923
    # Only start the instance if it's marked as up
6924
    if instance.admin_up:
6925
      self.LogInfo("Starting instance %s on node %s",
6926
                   instance.name, target_node)
6927

    
6928
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
6929
                                           ignore_secondaries=True)
6930
      if not disks_ok:
6931
        _ShutdownInstanceDisks(self, instance)
6932
        raise errors.OpExecError("Can't activate the instance's disks")
6933

    
6934
      result = self.rpc.call_instance_start(target_node, instance,
6935
                                            None, None, False)
6936
      msg = result.fail_msg
6937
      if msg:
6938
        _ShutdownInstanceDisks(self, instance)
6939
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
6940
                                 (instance.name, target_node, msg))
6941

    
6942

    
6943
class LUNodeMigrate(LogicalUnit):
6944
  """Migrate all instances from a node.
6945

6946
  """
6947
  HPATH = "node-migrate"
6948
  HTYPE = constants.HTYPE_NODE
6949
  REQ_BGL = False
6950

    
6951
  def CheckArguments(self):
6952
    pass
6953

    
6954
  def ExpandNames(self):
6955
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6956

    
6957
    self.share_locks = _ShareAll()
6958
    self.needed_locks = {
6959
      locking.LEVEL_NODE: [self.op.node_name],
6960
      }
6961

    
6962
  def BuildHooksEnv(self):
6963
    """Build hooks env.
6964

6965
    This runs on the master, the primary and all the secondaries.
6966

6967
    """
6968
    return {
6969
      "NODE_NAME": self.op.node_name,
6970
      }
6971

    
6972
  def BuildHooksNodes(self):
6973
    """Build hooks nodes.
6974

6975
    """
6976
    nl = [self.cfg.GetMasterNode()]
6977
    return (nl, nl)
6978

    
6979
  def CheckPrereq(self):
6980
    pass
6981

    
6982
  def Exec(self, feedback_fn):
6983
    # Prepare jobs for migration instances
6984
    jobs = [
6985
      [opcodes.OpInstanceMigrate(instance_name=inst.name,
6986
                                 mode=self.op.mode,
6987
                                 live=self.op.live,
6988
                                 iallocator=self.op.iallocator,
6989
                                 target_node=self.op.target_node)]
6990
      for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name)
6991
      ]
6992

    
6993
    # TODO: Run iallocator in this opcode and pass correct placement options to
6994
    # OpInstanceMigrate. Since other jobs can modify the cluster between
6995
    # running the iallocator and the actual migration, a good consistency model
6996
    # will have to be found.
6997

    
6998
    assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
6999
            frozenset([self.op.node_name]))
7000

    
7001
    return ResultWithJobs(jobs)
7002

    
7003

    
7004
class TLMigrateInstance(Tasklet):
7005
  """Tasklet class for instance migration.
7006

7007
  @type live: boolean
7008
  @ivar live: whether the migration will be done live or non-live;
7009
      this variable is initalized only after CheckPrereq has run
7010
  @type cleanup: boolean
7011
  @ivar cleanup: Wheater we cleanup from a failed migration
7012
  @type iallocator: string
7013
  @ivar iallocator: The iallocator used to determine target_node
7014
  @type target_node: string
7015
  @ivar target_node: If given, the target_node to reallocate the instance to
7016
  @type failover: boolean
7017
  @ivar failover: Whether operation results in failover or migration
7018
  @type fallback: boolean
7019
  @ivar fallback: Whether fallback to failover is allowed if migration not
7020
                  possible
7021
  @type ignore_consistency: boolean
7022
  @ivar ignore_consistency: Wheter we should ignore consistency between source
7023
                            and target node
7024
  @type shutdown_timeout: int
7025
  @ivar shutdown_timeout: In case of failover timeout of the shutdown
7026

7027
  """
7028

    
7029
  # Constants
7030
  _MIGRATION_POLL_INTERVAL = 1      # seconds
7031
  _MIGRATION_FEEDBACK_INTERVAL = 10 # seconds
7032

    
7033
  def __init__(self, lu, instance_name, cleanup=False,
7034
               failover=False, fallback=False,
7035
               ignore_consistency=False,
7036
               shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT):
7037
    """Initializes this class.
7038

7039
    """
7040
    Tasklet.__init__(self, lu)
7041

    
7042
    # Parameters
7043
    self.instance_name = instance_name
7044
    self.cleanup = cleanup
7045
    self.live = False # will be overridden later
7046
    self.failover = failover
7047
    self.fallback = fallback
7048
    self.ignore_consistency = ignore_consistency
7049
    self.shutdown_timeout = shutdown_timeout
7050

    
7051
  def CheckPrereq(self):
7052
    """Check prerequisites.
7053

7054
    This checks that the instance is in the cluster.
7055

7056
    """
7057
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
7058
    instance = self.cfg.GetInstanceInfo(instance_name)
7059
    assert instance is not None
7060
    self.instance = instance
7061

    
7062
    if (not self.cleanup and not instance.admin_up and not self.failover and
7063
        self.fallback):
7064
      self.lu.LogInfo("Instance is marked down, fallback allowed, switching"
7065
                      " to failover")
7066
      self.failover = True
7067

    
7068
    if instance.disk_template not in constants.DTS_MIRRORED:
7069
      if self.failover:
7070
        text = "failovers"
7071
      else:
7072
        text = "migrations"
7073
      raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
7074
                                 " %s" % (instance.disk_template, text),
7075
                                 errors.ECODE_STATE)
7076

    
7077
    if instance.disk_template in constants.DTS_EXT_MIRROR:
7078
      _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
7079

    
7080
      if self.lu.op.iallocator:
7081
        self._RunAllocator()
7082
      else:
7083
        # We set set self.target_node as it is required by
7084
        # BuildHooksEnv
7085
        self.target_node = self.lu.op.target_node
7086

    
7087
      # self.target_node is already populated, either directly or by the
7088
      # iallocator run
7089
      target_node = self.target_node
7090
      if self.target_node == instance.primary_node:
7091
        raise errors.OpPrereqError("Cannot migrate instance %s"
7092
                                   " to its primary (%s)" %
7093
                                   (instance.name, instance.primary_node))
7094

    
7095
      if len(self.lu.tasklets) == 1:
7096
        # It is safe to release locks only when we're the only tasklet
7097
        # in the LU
7098
        _ReleaseLocks(self.lu, locking.LEVEL_NODE,
7099
                      keep=[instance.primary_node, self.target_node])
7100

    
7101
    else:
7102
      secondary_nodes = instance.secondary_nodes
7103
      if not secondary_nodes:
7104
        raise errors.ConfigurationError("No secondary node but using"
7105
                                        " %s disk template" %
7106
                                        instance.disk_template)
7107
      target_node = secondary_nodes[0]
7108
      if self.lu.op.iallocator or (self.lu.op.target_node and
7109
                                   self.lu.op.target_node != target_node):
7110
        if self.failover:
7111
          text = "failed over"
7112
        else:
7113
          text = "migrated"
7114
        raise errors.OpPrereqError("Instances with disk template %s cannot"
7115
                                   " be %s to arbitrary nodes"
7116
                                   " (neither an iallocator nor a target"
7117
                                   " node can be passed)" %
7118
                                   (instance.disk_template, text),
7119
                                   errors.ECODE_INVAL)
7120

    
7121
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
7122

    
7123
    # check memory requirements on the secondary node
7124
    if not self.failover or instance.admin_up:
7125
      _CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
7126
                           instance.name, i_be[constants.BE_MEMORY],
7127
                           instance.hypervisor)
7128
    else:
7129
      self.lu.LogInfo("Not checking memory on the secondary node as"
7130
                      " instance will not be started")
7131

    
7132
    # check bridge existance
7133
    _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
7134

    
7135
    if not self.cleanup:
7136
      _CheckNodeNotDrained(self.lu, target_node)
7137
      if not self.failover:
7138
        result = self.rpc.call_instance_migratable(instance.primary_node,
7139
                                                   instance)
7140
        if result.fail_msg and self.fallback:
7141
          self.lu.LogInfo("Can't migrate, instance offline, fallback to"
7142
                          " failover")
7143
          self.failover = True
7144
        else:
7145
          result.Raise("Can't migrate, please use failover",
7146
                       prereq=True, ecode=errors.ECODE_STATE)
7147

    
7148
    assert not (self.failover and self.cleanup)
7149

    
7150
    if not self.failover:
7151
      if self.lu.op.live is not None and self.lu.op.mode is not None:
7152
        raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
7153
                                   " parameters are accepted",
7154
                                   errors.ECODE_INVAL)
7155
      if self.lu.op.live is not None:
7156
        if self.lu.op.live:
7157
          self.lu.op.mode = constants.HT_MIGRATION_LIVE
7158
        else:
7159
          self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
7160
        # reset the 'live' parameter to None so that repeated
7161
        # invocations of CheckPrereq do not raise an exception
7162
        self.lu.op.live = None
7163
      elif self.lu.op.mode is None:
7164
        # read the default value from the hypervisor
7165
        i_hv = self.cfg.GetClusterInfo().FillHV(self.instance,
7166
                                                skip_globals=False)
7167
        self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
7168

    
7169
      self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
7170
    else:
7171
      # Failover is never live
7172
      self.live = False
7173

    
7174
  def _RunAllocator(self):
7175
    """Run the allocator based on input opcode.
7176

7177
    """
7178
    ial = IAllocator(self.cfg, self.rpc,
7179
                     mode=constants.IALLOCATOR_MODE_RELOC,
7180
                     name=self.instance_name,
7181
                     # TODO See why hail breaks with a single node below
7182
                     relocate_from=[self.instance.primary_node,
7183
                                    self.instance.primary_node],
7184
                     )
7185

    
7186
    ial.Run(self.lu.op.iallocator)
7187

    
7188
    if not ial.success:
7189
      raise errors.OpPrereqError("Can't compute nodes using"
7190
                                 " iallocator '%s': %s" %
7191
                                 (self.lu.op.iallocator, ial.info),
7192
                                 errors.ECODE_NORES)
7193
    if len(ial.result) != ial.required_nodes:
7194
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7195
                                 " of nodes (%s), required %s" %
7196
                                 (self.lu.op.iallocator, len(ial.result),
7197
                                  ial.required_nodes), errors.ECODE_FAULT)
7198
    self.target_node = ial.result[0]
7199
    self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
7200
                 self.instance_name, self.lu.op.iallocator,
7201
                 utils.CommaJoin(ial.result))
7202

    
7203
  def _WaitUntilSync(self):
7204
    """Poll with custom rpc for disk sync.
7205

7206
    This uses our own step-based rpc call.
7207

7208
    """
7209
    self.feedback_fn("* wait until resync is done")
7210
    all_done = False
7211
    while not all_done:
7212
      all_done = True
7213
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
7214
                                            self.nodes_ip,
7215
                                            self.instance.disks)
7216
      min_percent = 100
7217
      for node, nres in result.items():
7218
        nres.Raise("Cannot resync disks on node %s" % node)
7219
        node_done, node_percent = nres.payload
7220
        all_done = all_done and node_done
7221
        if node_percent is not None:
7222
          min_percent = min(min_percent, node_percent)
7223
      if not all_done:
7224
        if min_percent < 100:
7225
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
7226
        time.sleep(2)
7227

    
7228
  def _EnsureSecondary(self, node):
7229
    """Demote a node to secondary.
7230

7231
    """
7232
    self.feedback_fn("* switching node %s to secondary mode" % node)
7233

    
7234
    for dev in self.instance.disks:
7235
      self.cfg.SetDiskID(dev, node)
7236

    
7237
    result = self.rpc.call_blockdev_close(node, self.instance.name,
7238
                                          self.instance.disks)
7239
    result.Raise("Cannot change disk to secondary on node %s" % node)
7240

    
7241
  def _GoStandalone(self):
7242
    """Disconnect from the network.
7243

7244
    """
7245
    self.feedback_fn("* changing into standalone mode")
7246
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
7247
                                               self.instance.disks)
7248
    for node, nres in result.items():
7249
      nres.Raise("Cannot disconnect disks node %s" % node)
7250

    
7251
  def _GoReconnect(self, multimaster):
7252
    """Reconnect to the network.
7253

7254
    """
7255
    if multimaster:
7256
      msg = "dual-master"
7257
    else:
7258
      msg = "single-master"
7259
    self.feedback_fn("* changing disks into %s mode" % msg)
7260
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
7261
                                           self.instance.disks,
7262
                                           self.instance.name, multimaster)
7263
    for node, nres in result.items():
7264
      nres.Raise("Cannot change disks config on node %s" % node)
7265

    
7266
  def _ExecCleanup(self):
7267
    """Try to cleanup after a failed migration.
7268

7269
    The cleanup is done by:
7270
      - check that the instance is running only on one node
7271
        (and update the config if needed)
7272
      - change disks on its secondary node to secondary
7273
      - wait until disks are fully synchronized
7274
      - disconnect from the network
7275
      - change disks into single-master mode
7276
      - wait again until disks are fully synchronized
7277

7278
    """
7279
    instance = self.instance
7280
    target_node = self.target_node
7281
    source_node = self.source_node
7282

    
7283
    # check running on only one node
7284
    self.feedback_fn("* checking where the instance actually runs"
7285
                     " (if this hangs, the hypervisor might be in"
7286
                     " a bad state)")
7287
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
7288
    for node, result in ins_l.items():
7289
      result.Raise("Can't contact node %s" % node)
7290

    
7291
    runningon_source = instance.name in ins_l[source_node].payload
7292
    runningon_target = instance.name in ins_l[target_node].payload
7293

    
7294
    if runningon_source and runningon_target:
7295
      raise errors.OpExecError("Instance seems to be running on two nodes,"
7296
                               " or the hypervisor is confused; you will have"
7297
                               " to ensure manually that it runs only on one"
7298
                               " and restart this operation")
7299

    
7300
    if not (runningon_source or runningon_target):
7301
      raise errors.OpExecError("Instance does not seem to be running at all;"
7302
                               " in this case it's safer to repair by"
7303
                               " running 'gnt-instance stop' to ensure disk"
7304
                               " shutdown, and then restarting it")
7305

    
7306
    if runningon_target:
7307
      # the migration has actually succeeded, we need to update the config
7308
      self.feedback_fn("* instance running on secondary node (%s),"
7309
                       " updating config" % target_node)
7310
      instance.primary_node = target_node
7311
      self.cfg.Update(instance, self.feedback_fn)
7312
      demoted_node = source_node
7313
    else:
7314
      self.feedback_fn("* instance confirmed to be running on its"
7315
                       " primary node (%s)" % source_node)
7316
      demoted_node = target_node
7317

    
7318
    if instance.disk_template in constants.DTS_INT_MIRROR:
7319
      self._EnsureSecondary(demoted_node)
7320
      try:
7321
        self._WaitUntilSync()
7322
      except errors.OpExecError:
7323
        # we ignore here errors, since if the device is standalone, it
7324
        # won't be able to sync
7325
        pass
7326
      self._GoStandalone()
7327
      self._GoReconnect(False)
7328
      self._WaitUntilSync()
7329

    
7330
    self.feedback_fn("* done")
7331

    
7332
  def _RevertDiskStatus(self):
7333
    """Try to revert the disk status after a failed migration.
7334

7335
    """
7336
    target_node = self.target_node
7337
    if self.instance.disk_template in constants.DTS_EXT_MIRROR:
7338
      return
7339

    
7340
    try:
7341
      self._EnsureSecondary(target_node)
7342
      self._GoStandalone()
7343
      self._GoReconnect(False)
7344
      self._WaitUntilSync()
7345
    except errors.OpExecError, err:
7346
      self.lu.LogWarning("Migration failed and I can't reconnect the drives,"
7347
                         " please try to recover the instance manually;"
7348
                         " error '%s'" % str(err))
7349

    
7350
  def _AbortMigration(self):
7351
    """Call the hypervisor code to abort a started migration.
7352

7353
    """
7354
    instance = self.instance
7355
    target_node = self.target_node
7356
    source_node = self.source_node
7357
    migration_info = self.migration_info
7358

    
7359
    abort_result = self.rpc.call_instance_finalize_migration_dst(target_node,
7360
                                                                 instance,
7361
                                                                 migration_info,
7362
                                                                 False)
7363
    abort_msg = abort_result.fail_msg
7364
    if abort_msg:
7365
      logging.error("Aborting migration failed on target node %s: %s",
7366
                    target_node, abort_msg)
7367
      # Don't raise an exception here, as we stil have to try to revert the
7368
      # disk status, even if this step failed.
7369

    
7370
    abort_result = self.rpc.call_instance_finalize_migration_src(source_node,
7371
        instance, False, self.live)
7372
    abort_msg = abort_result.fail_msg
7373
    if abort_msg:
7374
      logging.error("Aborting migration failed on source node %s: %s",
7375
                    source_node, abort_msg)
7376

    
7377
  def _ExecMigration(self):
7378
    """Migrate an instance.
7379

7380
    The migrate is done by:
7381
      - change the disks into dual-master mode
7382
      - wait until disks are fully synchronized again
7383
      - migrate the instance
7384
      - change disks on the new secondary node (the old primary) to secondary
7385
      - wait until disks are fully synchronized
7386
      - change disks into single-master mode
7387

7388
    """
7389
    instance = self.instance
7390
    target_node = self.target_node
7391
    source_node = self.source_node
7392

    
7393
    self.feedback_fn("* checking disk consistency between source and target")
7394
    for dev in instance.disks:
7395
      if not _CheckDiskConsistency(self.lu, dev, target_node, False):
7396
        raise errors.OpExecError("Disk %s is degraded or not fully"
7397
                                 " synchronized on target node,"
7398
                                 " aborting migration" % dev.iv_name)
7399

    
7400
    # First get the migration information from the remote node
7401
    result = self.rpc.call_migration_info(source_node, instance)
7402
    msg = result.fail_msg
7403
    if msg:
7404
      log_err = ("Failed fetching source migration information from %s: %s" %
7405
                 (source_node, msg))
7406
      logging.error(log_err)
7407
      raise errors.OpExecError(log_err)
7408

    
7409
    self.migration_info = migration_info = result.payload
7410

    
7411
    if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
7412
      # Then switch the disks to master/master mode
7413
      self._EnsureSecondary(target_node)
7414
      self._GoStandalone()
7415
      self._GoReconnect(True)
7416
      self._WaitUntilSync()
7417

    
7418
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
7419
    result = self.rpc.call_accept_instance(target_node,
7420
                                           instance,
7421
                                           migration_info,
7422
                                           self.nodes_ip[target_node])
7423

    
7424
    msg = result.fail_msg
7425
    if msg:
7426
      logging.error("Instance pre-migration failed, trying to revert"
7427
                    " disk status: %s", msg)
7428
      self.feedback_fn("Pre-migration failed, aborting")
7429
      self._AbortMigration()
7430
      self._RevertDiskStatus()
7431
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
7432
                               (instance.name, msg))
7433

    
7434
    self.feedback_fn("* migrating instance to %s" % target_node)
7435
    result = self.rpc.call_instance_migrate(source_node, instance,
7436
                                            self.nodes_ip[target_node],
7437
                                            self.live)
7438
    msg = result.fail_msg
7439
    if msg:
7440
      logging.error("Instance migration failed, trying to revert"
7441
                    " disk status: %s", msg)
7442
      self.feedback_fn("Migration failed, aborting")
7443
      self._AbortMigration()
7444
      self._RevertDiskStatus()
7445
      raise errors.OpExecError("Could not migrate instance %s: %s" %
7446
                               (instance.name, msg))
7447

    
7448
    self.feedback_fn("* starting memory transfer")
7449
    last_feedback = time.time()
7450
    while True:
7451
      result = self.rpc.call_instance_get_migration_status(source_node,
7452
                                                           instance)
7453
      msg = result.fail_msg
7454
      ms = result.payload   # MigrationStatus instance
7455
      if msg or (ms.status in constants.HV_MIGRATION_FAILED_STATUSES):
7456
        logging.error("Instance migration failed, trying to revert"
7457
                      " disk status: %s", msg)
7458
        self.feedback_fn("Migration failed, aborting")
7459
        self._AbortMigration()
7460
        self._RevertDiskStatus()
7461
        raise errors.OpExecError("Could not migrate instance %s: %s" %
7462
                                 (instance.name, msg))
7463

    
7464
      if result.payload.status != constants.HV_MIGRATION_ACTIVE:
7465
        self.feedback_fn("* memory transfer complete")
7466
        break
7467

    
7468
      if (utils.TimeoutExpired(last_feedback,
7469
                               self._MIGRATION_FEEDBACK_INTERVAL) and
7470
          ms.transferred_ram is not None):
7471
        mem_progress = 100 * float(ms.transferred_ram) / float(ms.total_ram)
7472
        self.feedback_fn("* memory transfer progress: %.2f %%" % mem_progress)
7473
        last_feedback = time.time()
7474

    
7475
      time.sleep(self._MIGRATION_POLL_INTERVAL)
7476

    
7477
    result = self.rpc.call_instance_finalize_migration_src(source_node,
7478
                                                           instance,
7479
                                                           True,
7480
                                                           self.live)
7481
    msg = result.fail_msg
7482
    if msg:
7483
      logging.error("Instance migration succeeded, but finalization failed"
7484
                    " on the source node: %s", msg)
7485
      raise errors.OpExecError("Could not finalize instance migration: %s" %
7486
                               msg)
7487

    
7488
    instance.primary_node = target_node
7489

    
7490
    # distribute new instance config to the other nodes
7491
    self.cfg.Update(instance, self.feedback_fn)
7492

    
7493
    result = self.rpc.call_instance_finalize_migration_dst(target_node,
7494
                                                           instance,
7495
                                                           migration_info,
7496
                                                           True)
7497
    msg = result.fail_msg
7498
    if msg:
7499
      logging.error("Instance migration succeeded, but finalization failed"
7500
                    " on the target node: %s", msg)
7501
      raise errors.OpExecError("Could not finalize instance migration: %s" %
7502
                               msg)
7503

    
7504
    if self.instance.disk_template not in constants.DTS_EXT_MIRROR:
7505
      self._EnsureSecondary(source_node)
7506
      self._WaitUntilSync()
7507
      self._GoStandalone()
7508
      self._GoReconnect(False)
7509
      self._WaitUntilSync()
7510

    
7511
    self.feedback_fn("* done")
7512

    
7513
  def _ExecFailover(self):
7514
    """Failover an instance.
7515

7516
    The failover is done by shutting it down on its present node and
7517
    starting it on the secondary.
7518

7519
    """
7520
    instance = self.instance
7521
    primary_node = self.cfg.GetNodeInfo(instance.primary_node)
7522

    
7523
    source_node = instance.primary_node
7524
    target_node = self.target_node
7525

    
7526
    if instance.admin_up:
7527
      self.feedback_fn("* checking disk consistency between source and target")
7528
      for dev in instance.disks:
7529
        # for drbd, these are drbd over lvm
7530
        if not _CheckDiskConsistency(self.lu, dev, target_node, False):
7531
          if primary_node.offline:
7532
            self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
7533
                             " target node %s" %
7534
                             (primary_node.name, dev.iv_name, target_node))
7535
          elif not self.ignore_consistency:
7536
            raise errors.OpExecError("Disk %s is degraded on target node,"
7537
                                     " aborting failover" % dev.iv_name)
7538
    else:
7539
      self.feedback_fn("* not checking disk consistency as instance is not"
7540
                       " running")
7541

    
7542
    self.feedback_fn("* shutting down instance on source node")
7543
    logging.info("Shutting down instance %s on node %s",
7544
                 instance.name, source_node)
7545

    
7546
    result = self.rpc.call_instance_shutdown(source_node, instance,
7547
                                             self.shutdown_timeout)
7548
    msg = result.fail_msg
7549
    if msg:
7550
      if self.ignore_consistency or primary_node.offline:
7551
        self.lu.LogWarning("Could not shutdown instance %s on node %s,"
7552
                           " proceeding anyway; please make sure node"
7553
                           " %s is down; error details: %s",
7554
                           instance.name, source_node, source_node, msg)
7555
      else:
7556
        raise errors.OpExecError("Could not shutdown instance %s on"
7557
                                 " node %s: %s" %
7558
                                 (instance.name, source_node, msg))
7559

    
7560
    self.feedback_fn("* deactivating the instance's disks on source node")
7561
    if not _ShutdownInstanceDisks(self.lu, instance, ignore_primary=True):
7562
      raise errors.OpExecError("Can't shut down the instance's disks")
7563

    
7564
    instance.primary_node = target_node
7565
    # distribute new instance config to the other nodes
7566
    self.cfg.Update(instance, self.feedback_fn)
7567

    
7568
    # Only start the instance if it's marked as up
7569
    if instance.admin_up:
7570
      self.feedback_fn("* activating the instance's disks on target node %s" %
7571
                       target_node)
7572
      logging.info("Starting instance %s on node %s",
7573
                   instance.name, target_node)
7574

    
7575
      disks_ok, _ = _AssembleInstanceDisks(self.lu, instance,
7576
                                           ignore_secondaries=True)
7577
      if not disks_ok:
7578
        _ShutdownInstanceDisks(self.lu, instance)
7579
        raise errors.OpExecError("Can't activate the instance's disks")
7580

    
7581
      self.feedback_fn("* starting the instance on the target node %s" %
7582
                       target_node)
7583
      result = self.rpc.call_instance_start(target_node, instance, None, None,
7584
                                            False)
7585
      msg = result.fail_msg
7586
      if msg:
7587
        _ShutdownInstanceDisks(self.lu, instance)
7588
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
7589
                                 (instance.name, target_node, msg))
7590

    
7591
  def Exec(self, feedback_fn):
7592
    """Perform the migration.
7593

7594
    """
7595
    self.feedback_fn = feedback_fn
7596
    self.source_node = self.instance.primary_node
7597

    
7598
    # FIXME: if we implement migrate-to-any in DRBD, this needs fixing
7599
    if self.instance.disk_template in constants.DTS_INT_MIRROR:
7600
      self.target_node = self.instance.secondary_nodes[0]
7601
      # Otherwise self.target_node has been populated either
7602
      # directly, or through an iallocator.
7603

    
7604
    self.all_nodes = [self.source_node, self.target_node]
7605
    self.nodes_ip = dict((name, node.secondary_ip) for (name, node)
7606
                         in self.cfg.GetMultiNodeInfo(self.all_nodes))
7607

    
7608
    if self.failover:
7609
      feedback_fn("Failover instance %s" % self.instance.name)
7610
      self._ExecFailover()
7611
    else:
7612
      feedback_fn("Migrating instance %s" % self.instance.name)
7613

    
7614
      if self.cleanup:
7615
        return self._ExecCleanup()
7616
      else:
7617
        return self._ExecMigration()
7618

    
7619

    
7620
def _CreateBlockDev(lu, node, instance, device, force_create,
7621
                    info, force_open):
7622
  """Create a tree of block devices on a given node.
7623

7624
  If this device type has to be created on secondaries, create it and
7625
  all its children.
7626

7627
  If not, just recurse to children keeping the same 'force' value.
7628

7629
  @param lu: the lu on whose behalf we execute
7630
  @param node: the node on which to create the device
7631
  @type instance: L{objects.Instance}
7632
  @param instance: the instance which owns the device
7633
  @type device: L{objects.Disk}
7634
  @param device: the device to create
7635
  @type force_create: boolean
7636
  @param force_create: whether to force creation of this device; this
7637
      will be change to True whenever we find a device which has
7638
      CreateOnSecondary() attribute
7639
  @param info: the extra 'metadata' we should attach to the device
7640
      (this will be represented as a LVM tag)
7641
  @type force_open: boolean
7642
  @param force_open: this parameter will be passes to the
7643
      L{backend.BlockdevCreate} function where it specifies
7644
      whether we run on primary or not, and it affects both
7645
      the child assembly and the device own Open() execution
7646

7647
  """
7648
  if device.CreateOnSecondary():
7649
    force_create = True
7650

    
7651
  if device.children:
7652
    for child in device.children:
7653
      _CreateBlockDev(lu, node, instance, child, force_create,
7654
                      info, force_open)
7655

    
7656
  if not force_create:
7657
    return
7658

    
7659
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
7660

    
7661

    
7662
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
7663
  """Create a single block device on a given node.
7664

7665
  This will not recurse over children of the device, so they must be
7666
  created in advance.
7667

7668
  @param lu: the lu on whose behalf we execute
7669
  @param node: the node on which to create the device
7670
  @type instance: L{objects.Instance}
7671
  @param instance: the instance which owns the device
7672
  @type device: L{objects.Disk}
7673
  @param device: the device to create
7674
  @param info: the extra 'metadata' we should attach to the device
7675
      (this will be represented as a LVM tag)
7676
  @type force_open: boolean
7677
  @param force_open: this parameter will be passes to the
7678
      L{backend.BlockdevCreate} function where it specifies
7679
      whether we run on primary or not, and it affects both
7680
      the child assembly and the device own Open() execution
7681

7682
  """
7683
  lu.cfg.SetDiskID(device, node)
7684
  result = lu.rpc.call_blockdev_create(node, device, device.size,
7685
                                       instance.name, force_open, info)
7686
  result.Raise("Can't create block device %s on"
7687
               " node %s for instance %s" % (device, node, instance.name))
7688
  if device.physical_id is None:
7689
    device.physical_id = result.payload
7690

    
7691

    
7692
def _GenerateUniqueNames(lu, exts):
7693
  """Generate a suitable LV name.
7694

7695
  This will generate a logical volume name for the given instance.
7696

7697
  """
7698
  results = []
7699
  for val in exts:
7700
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
7701
    results.append("%s%s" % (new_id, val))
7702
  return results
7703

    
7704

    
7705
def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
7706
                         iv_name, p_minor, s_minor):
7707
  """Generate a drbd8 device complete with its children.
7708

7709
  """
7710
  assert len(vgnames) == len(names) == 2
7711
  port = lu.cfg.AllocatePort()
7712
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
7713
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
7714
                          logical_id=(vgnames[0], names[0]))
7715
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
7716
                          logical_id=(vgnames[1], names[1]))
7717
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
7718
                          logical_id=(primary, secondary, port,
7719
                                      p_minor, s_minor,
7720
                                      shared_secret),
7721
                          children=[dev_data, dev_meta],
7722
                          iv_name=iv_name)
7723
  return drbd_dev
7724

    
7725

    
7726
def _GenerateDiskTemplate(lu, template_name,
7727
                          instance_name, primary_node,
7728
                          secondary_nodes, disk_info,
7729
                          file_storage_dir, file_driver,
7730
                          base_index, feedback_fn):
7731
  """Generate the entire disk layout for a given template type.
7732

7733
  """
7734
  #TODO: compute space requirements
7735

    
7736
  vgname = lu.cfg.GetVGName()
7737
  disk_count = len(disk_info)
7738
  disks = []
7739
  if template_name == constants.DT_DISKLESS:
7740
    pass
7741
  elif template_name == constants.DT_PLAIN:
7742
    if len(secondary_nodes) != 0:
7743
      raise errors.ProgrammerError("Wrong template configuration")
7744

    
7745
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
7746
                                      for i in range(disk_count)])
7747
    for idx, disk in enumerate(disk_info):
7748
      disk_index = idx + base_index
7749
      vg = disk.get(constants.IDISK_VG, vgname)
7750
      feedback_fn("* disk %i, vg %s, name %s" % (idx, vg, names[idx]))
7751
      disk_dev = objects.Disk(dev_type=constants.LD_LV,
7752
                              size=disk[constants.IDISK_SIZE],
7753
                              logical_id=(vg, names[idx]),
7754
                              iv_name="disk/%d" % disk_index,
7755
                              mode=disk[constants.IDISK_MODE])
7756
      disks.append(disk_dev)
7757
  elif template_name == constants.DT_DRBD8:
7758
    if len(secondary_nodes) != 1:
7759
      raise errors.ProgrammerError("Wrong template configuration")
7760
    remote_node = secondary_nodes[0]
7761
    minors = lu.cfg.AllocateDRBDMinor(
7762
      [primary_node, remote_node] * len(disk_info), instance_name)
7763

    
7764
    names = []
7765
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
7766
                                               for i in range(disk_count)]):
7767
      names.append(lv_prefix + "_data")
7768
      names.append(lv_prefix + "_meta")
7769
    for idx, disk in enumerate(disk_info):
7770
      disk_index = idx + base_index
7771
      data_vg = disk.get(constants.IDISK_VG, vgname)
7772
      meta_vg = disk.get(constants.IDISK_METAVG, data_vg)
7773
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
7774
                                      disk[constants.IDISK_SIZE],
7775
                                      [data_vg, meta_vg],
7776
                                      names[idx * 2:idx * 2 + 2],
7777
                                      "disk/%d" % disk_index,
7778
                                      minors[idx * 2], minors[idx * 2 + 1])
7779
      disk_dev.mode = disk[constants.IDISK_MODE]
7780
      disks.append(disk_dev)
7781
  elif template_name == constants.DT_FILE:
7782
    if len(secondary_nodes) != 0:
7783
      raise errors.ProgrammerError("Wrong template configuration")
7784

    
7785
    opcodes.RequireFileStorage()
7786

    
7787
    for idx, disk in enumerate(disk_info):
7788
      disk_index = idx + base_index
7789
      disk_dev = objects.Disk(dev_type=constants.LD_FILE,
7790
                              size=disk[constants.IDISK_SIZE],
7791
                              iv_name="disk/%d" % disk_index,
7792
                              logical_id=(file_driver,
7793
                                          "%s/disk%d" % (file_storage_dir,
7794
                                                         disk_index)),
7795
                              mode=disk[constants.IDISK_MODE])
7796
      disks.append(disk_dev)
7797
  elif template_name == constants.DT_SHARED_FILE:
7798
    if len(secondary_nodes) != 0:
7799
      raise errors.ProgrammerError("Wrong template configuration")
7800

    
7801
    opcodes.RequireSharedFileStorage()
7802

    
7803
    for idx, disk in enumerate(disk_info):
7804
      disk_index = idx + base_index
7805
      disk_dev = objects.Disk(dev_type=constants.LD_FILE,
7806
                              size=disk[constants.IDISK_SIZE],
7807
                              iv_name="disk/%d" % disk_index,
7808
                              logical_id=(file_driver,
7809
                                          "%s/disk%d" % (file_storage_dir,
7810
                                                         disk_index)),
7811
                              mode=disk[constants.IDISK_MODE])
7812
      disks.append(disk_dev)
7813
  elif template_name == constants.DT_BLOCK:
7814
    if len(secondary_nodes) != 0:
7815
      raise errors.ProgrammerError("Wrong template configuration")
7816

    
7817
    for idx, disk in enumerate(disk_info):
7818
      disk_index = idx + base_index
7819
      disk_dev = objects.Disk(dev_type=constants.LD_BLOCKDEV,
7820
                              size=disk[constants.IDISK_SIZE],
7821
                              logical_id=(constants.BLOCKDEV_DRIVER_MANUAL,
7822
                                          disk[constants.IDISK_ADOPT]),
7823
                              iv_name="disk/%d" % disk_index,
7824
                              mode=disk[constants.IDISK_MODE])
7825
      disks.append(disk_dev)
7826

    
7827
  else:
7828
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
7829
  return disks
7830

    
7831

    
7832
def _GetInstanceInfoText(instance):
7833
  """Compute that text that should be added to the disk's metadata.
7834

7835
  """
7836
  return "originstname+%s" % instance.name
7837

    
7838

    
7839
def _CalcEta(time_taken, written, total_size):
7840
  """Calculates the ETA based on size written and total size.
7841

7842
  @param time_taken: The time taken so far
7843
  @param written: amount written so far
7844
  @param total_size: The total size of data to be written
7845
  @return: The remaining time in seconds
7846

7847
  """
7848
  avg_time = time_taken / float(written)
7849
  return (total_size - written) * avg_time
7850

    
7851

    
7852
def _WipeDisks(lu, instance):
7853
  """Wipes instance disks.
7854

7855
  @type lu: L{LogicalUnit}
7856
  @param lu: the logical unit on whose behalf we execute
7857
  @type instance: L{objects.Instance}
7858
  @param instance: the instance whose disks we should create
7859
  @return: the success of the wipe
7860

7861
  """
7862
  node = instance.primary_node
7863

    
7864
  for device in instance.disks:
7865
    lu.cfg.SetDiskID(device, node)
7866

    
7867
  logging.info("Pause sync of instance %s disks", instance.name)
7868
  result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
7869

    
7870
  for idx, success in enumerate(result.payload):
7871
    if not success:
7872
      logging.warn("pause-sync of instance %s for disks %d failed",
7873
                   instance.name, idx)
7874

    
7875
  try:
7876
    for idx, device in enumerate(instance.disks):
7877
      # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
7878
      # MAX_WIPE_CHUNK at max
7879
      wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
7880
                            constants.MIN_WIPE_CHUNK_PERCENT)
7881
      # we _must_ make this an int, otherwise rounding errors will
7882
      # occur
7883
      wipe_chunk_size = int(wipe_chunk_size)
7884

    
7885
      lu.LogInfo("* Wiping disk %d", idx)
7886
      logging.info("Wiping disk %d for instance %s, node %s using"
7887
                   " chunk size %s", idx, instance.name, node, wipe_chunk_size)
7888

    
7889
      offset = 0
7890
      size = device.size
7891
      last_output = 0
7892
      start_time = time.time()
7893

    
7894
      while offset < size:
7895
        wipe_size = min(wipe_chunk_size, size - offset)
7896
        logging.debug("Wiping disk %d, offset %s, chunk %s",
7897
                      idx, offset, wipe_size)
7898
        result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
7899
        result.Raise("Could not wipe disk %d at offset %d for size %d" %
7900
                     (idx, offset, wipe_size))
7901
        now = time.time()
7902
        offset += wipe_size
7903
        if now - last_output >= 60:
7904
          eta = _CalcEta(now - start_time, offset, size)
7905
          lu.LogInfo(" - done: %.1f%% ETA: %s" %
7906
                     (offset / float(size) * 100, utils.FormatSeconds(eta)))
7907
          last_output = now
7908
  finally:
7909
    logging.info("Resume sync of instance %s disks", instance.name)
7910

    
7911
    result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, False)
7912

    
7913
    for idx, success in enumerate(result.payload):
7914
      if not success:
7915
        lu.LogWarning("Resume sync of disk %d failed, please have a"
7916
                      " look at the status and troubleshoot the issue", idx)
7917
        logging.warn("resume-sync of instance %s for disks %d failed",
7918
                     instance.name, idx)
7919

    
7920

    
7921
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
7922
  """Create all disks for an instance.
7923

7924
  This abstracts away some work from AddInstance.
7925

7926
  @type lu: L{LogicalUnit}
7927
  @param lu: the logical unit on whose behalf we execute
7928
  @type instance: L{objects.Instance}
7929
  @param instance: the instance whose disks we should create
7930
  @type to_skip: list
7931
  @param to_skip: list of indices to skip
7932
  @type target_node: string
7933
  @param target_node: if passed, overrides the target node for creation
7934
  @rtype: boolean
7935
  @return: the success of the creation
7936

7937
  """
7938
  info = _GetInstanceInfoText(instance)
7939
  if target_node is None:
7940
    pnode = instance.primary_node
7941
    all_nodes = instance.all_nodes
7942
  else:
7943
    pnode = target_node
7944
    all_nodes = [pnode]
7945

    
7946
  if instance.disk_template in constants.DTS_FILEBASED:
7947
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
7948
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
7949

    
7950
    result.Raise("Failed to create directory '%s' on"
7951
                 " node %s" % (file_storage_dir, pnode))
7952

    
7953
  # Note: this needs to be kept in sync with adding of disks in
7954
  # LUInstanceSetParams
7955
  for idx, device in enumerate(instance.disks):
7956
    if to_skip and idx in to_skip:
7957
      continue
7958
    logging.info("Creating volume %s for instance %s",
7959
                 device.iv_name, instance.name)
7960
    #HARDCODE
7961
    for node in all_nodes:
7962
      f_create = node == pnode
7963
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
7964

    
7965

    
7966
def _RemoveDisks(lu, instance, target_node=None):
7967
  """Remove all disks for an instance.
7968

7969
  This abstracts away some work from `AddInstance()` and
7970
  `RemoveInstance()`. Note that in case some of the devices couldn't
7971
  be removed, the removal will continue with the other ones (compare
7972
  with `_CreateDisks()`).
7973

7974
  @type lu: L{LogicalUnit}
7975
  @param lu: the logical unit on whose behalf we execute
7976
  @type instance: L{objects.Instance}
7977
  @param instance: the instance whose disks we should remove
7978
  @type target_node: string
7979
  @param target_node: used to override the node on which to remove the disks
7980
  @rtype: boolean
7981
  @return: the success of the removal
7982

7983
  """
7984
  logging.info("Removing block devices for instance %s", instance.name)
7985

    
7986
  all_result = True
7987
  for device in instance.disks:
7988
    if target_node:
7989
      edata = [(target_node, device)]
7990
    else:
7991
      edata = device.ComputeNodeTree(instance.primary_node)
7992
    for node, disk in edata:
7993
      lu.cfg.SetDiskID(disk, node)
7994
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
7995
      if msg:
7996
        lu.LogWarning("Could not remove block device %s on node %s,"
7997
                      " continuing anyway: %s", device.iv_name, node, msg)
7998
        all_result = False
7999

    
8000
  if instance.disk_template == constants.DT_FILE:
8001
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
8002
    if target_node:
8003
      tgt = target_node
8004
    else:
8005
      tgt = instance.primary_node
8006
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
8007
    if result.fail_msg:
8008
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
8009
                    file_storage_dir, instance.primary_node, result.fail_msg)
8010
      all_result = False
8011

    
8012
  return all_result
8013

    
8014

    
8015
def _ComputeDiskSizePerVG(disk_template, disks):
8016
  """Compute disk size requirements in the volume group
8017

8018
  """
8019
  def _compute(disks, payload):
8020
    """Universal algorithm.
8021

8022
    """
8023
    vgs = {}
8024
    for disk in disks:
8025
      vgs[disk[constants.IDISK_VG]] = \
8026
        vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
8027

    
8028
    return vgs
8029

    
8030
  # Required free disk space as a function of disk and swap space
8031
  req_size_dict = {
8032
    constants.DT_DISKLESS: {},
8033
    constants.DT_PLAIN: _compute(disks, 0),
8034
    # 128 MB are added for drbd metadata for each disk
8035
    constants.DT_DRBD8: _compute(disks, 128),
8036
    constants.DT_FILE: {},
8037
    constants.DT_SHARED_FILE: {},
8038
  }
8039

    
8040
  if disk_template not in req_size_dict:
8041
    raise errors.ProgrammerError("Disk template '%s' size requirement"
8042
                                 " is unknown" % disk_template)
8043

    
8044
  return req_size_dict[disk_template]
8045

    
8046

    
8047
def _ComputeDiskSize(disk_template, disks):
8048
  """Compute disk size requirements in the volume group
8049

8050
  """
8051
  # Required free disk space as a function of disk and swap space
8052
  req_size_dict = {
8053
    constants.DT_DISKLESS: None,
8054
    constants.DT_PLAIN: sum(d[constants.IDISK_SIZE] for d in disks),
8055
    # 128 MB are added for drbd metadata for each disk
8056
    constants.DT_DRBD8: sum(d[constants.IDISK_SIZE] + 128 for d in disks),
8057
    constants.DT_FILE: None,
8058
    constants.DT_SHARED_FILE: 0,
8059
    constants.DT_BLOCK: 0,
8060
  }
8061

    
8062
  if disk_template not in req_size_dict:
8063
    raise errors.ProgrammerError("Disk template '%s' size requirement"
8064
                                 " is unknown" % disk_template)
8065

    
8066
  return req_size_dict[disk_template]
8067

    
8068

    
8069
def _FilterVmNodes(lu, nodenames):
8070
  """Filters out non-vm_capable nodes from a list.
8071

8072
  @type lu: L{LogicalUnit}
8073
  @param lu: the logical unit for which we check
8074
  @type nodenames: list
8075
  @param nodenames: the list of nodes on which we should check
8076
  @rtype: list
8077
  @return: the list of vm-capable nodes
8078

8079
  """
8080
  vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
8081
  return [name for name in nodenames if name not in vm_nodes]
8082

    
8083

    
8084
def _CheckHVParams(lu, nodenames, hvname, hvparams):
8085
  """Hypervisor parameter validation.
8086

8087
  This function abstract the hypervisor parameter validation to be
8088
  used in both instance create and instance modify.
8089

8090
  @type lu: L{LogicalUnit}
8091
  @param lu: the logical unit for which we check
8092
  @type nodenames: list
8093
  @param nodenames: the list of nodes on which we should check
8094
  @type hvname: string
8095
  @param hvname: the name of the hypervisor we should use
8096
  @type hvparams: dict
8097
  @param hvparams: the parameters which we need to check
8098
  @raise errors.OpPrereqError: if the parameters are not valid
8099

8100
  """
8101
  nodenames = _FilterVmNodes(lu, nodenames)
8102
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
8103
                                                  hvname,
8104
                                                  hvparams)
8105
  for node in nodenames:
8106
    info = hvinfo[node]
8107
    if info.offline:
8108
      continue
8109
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
8110

    
8111

    
8112
def _CheckOSParams(lu, required, nodenames, osname, osparams):
8113
  """OS parameters validation.
8114

8115
  @type lu: L{LogicalUnit}
8116
  @param lu: the logical unit for which we check
8117
  @type required: boolean
8118
  @param required: whether the validation should fail if the OS is not
8119
      found
8120
  @type nodenames: list
8121
  @param nodenames: the list of nodes on which we should check
8122
  @type osname: string
8123
  @param osname: the name of the hypervisor we should use
8124
  @type osparams: dict
8125
  @param osparams: the parameters which we need to check
8126
  @raise errors.OpPrereqError: if the parameters are not valid
8127

8128
  """
8129
  nodenames = _FilterVmNodes(lu, nodenames)
8130
  result = lu.rpc.call_os_validate(required, nodenames, osname,
8131
                                   [constants.OS_VALIDATE_PARAMETERS],
8132
                                   osparams)
8133
  for node, nres in result.items():
8134
    # we don't check for offline cases since this should be run only
8135
    # against the master node and/or an instance's nodes
8136
    nres.Raise("OS Parameters validation failed on node %s" % node)
8137
    if not nres.payload:
8138
      lu.LogInfo("OS %s not found on node %s, validation skipped",
8139
                 osname, node)
8140

    
8141

    
8142
class LUInstanceCreate(LogicalUnit):
8143
  """Create an instance.
8144

8145
  """
8146
  HPATH = "instance-add"
8147
  HTYPE = constants.HTYPE_INSTANCE
8148
  REQ_BGL = False
8149

    
8150
  def CheckArguments(self):
8151
    """Check arguments.
8152

8153
    """
8154
    # do not require name_check to ease forward/backward compatibility
8155
    # for tools
8156
    if self.op.no_install and self.op.start:
8157
      self.LogInfo("No-installation mode selected, disabling startup")
8158
      self.op.start = False
8159
    # validate/normalize the instance name
8160
    self.op.instance_name = \
8161
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
8162

    
8163
    if self.op.ip_check and not self.op.name_check:
8164
      # TODO: make the ip check more flexible and not depend on the name check
8165
      raise errors.OpPrereqError("Cannot do IP address check without a name"
8166
                                 " check", errors.ECODE_INVAL)
8167

    
8168
    # check nics' parameter names
8169
    for nic in self.op.nics:
8170
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
8171

    
8172
    # check disks. parameter names and consistent adopt/no-adopt strategy
8173
    has_adopt = has_no_adopt = False
8174
    for disk in self.op.disks:
8175
      utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
8176
      if constants.IDISK_ADOPT in disk:
8177
        has_adopt = True
8178
      else:
8179
        has_no_adopt = True
8180
    if has_adopt and has_no_adopt:
8181
      raise errors.OpPrereqError("Either all disks are adopted or none is",
8182
                                 errors.ECODE_INVAL)
8183
    if has_adopt:
8184
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
8185
        raise errors.OpPrereqError("Disk adoption is not supported for the"
8186
                                   " '%s' disk template" %
8187
                                   self.op.disk_template,
8188
                                   errors.ECODE_INVAL)
8189
      if self.op.iallocator is not None:
8190
        raise errors.OpPrereqError("Disk adoption not allowed with an"
8191
                                   " iallocator script", errors.ECODE_INVAL)
8192
      if self.op.mode == constants.INSTANCE_IMPORT:
8193
        raise errors.OpPrereqError("Disk adoption not allowed for"
8194
                                   " instance import", errors.ECODE_INVAL)
8195
    else:
8196
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
8197
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
8198
                                   " but no 'adopt' parameter given" %
8199
                                   self.op.disk_template,
8200
                                   errors.ECODE_INVAL)
8201

    
8202
    self.adopt_disks = has_adopt
8203

    
8204
    # instance name verification
8205
    if self.op.name_check:
8206
      self.hostname1 = netutils.GetHostname(name=self.op.instance_name)
8207
      self.op.instance_name = self.hostname1.name
8208
      # used in CheckPrereq for ip ping check
8209
      self.check_ip = self.hostname1.ip
8210
    else:
8211
      self.check_ip = None
8212

    
8213
    # file storage checks
8214
    if (self.op.file_driver and
8215
        not self.op.file_driver in constants.FILE_DRIVER):
8216
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
8217
                                 self.op.file_driver, errors.ECODE_INVAL)
8218

    
8219
    if self.op.disk_template == constants.DT_FILE:
8220
      opcodes.RequireFileStorage()
8221
    elif self.op.disk_template == constants.DT_SHARED_FILE:
8222
      opcodes.RequireSharedFileStorage()
8223

    
8224
    ### Node/iallocator related checks
8225
    _CheckIAllocatorOrNode(self, "iallocator", "pnode")
8226

    
8227
    if self.op.pnode is not None:
8228
      if self.op.disk_template in constants.DTS_INT_MIRROR:
8229
        if self.op.snode is None:
8230
          raise errors.OpPrereqError("The networked disk templates need"
8231
                                     " a mirror node", errors.ECODE_INVAL)
8232
      elif self.op.snode:
8233
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
8234
                        " template")
8235
        self.op.snode = None
8236

    
8237
    self._cds = _GetClusterDomainSecret()
8238

    
8239
    if self.op.mode == constants.INSTANCE_IMPORT:
8240
      # On import force_variant must be True, because if we forced it at
8241
      # initial install, our only chance when importing it back is that it
8242
      # works again!
8243
      self.op.force_variant = True
8244

    
8245
      if self.op.no_install:
8246
        self.LogInfo("No-installation mode has no effect during import")
8247

    
8248
    elif self.op.mode == constants.INSTANCE_CREATE:
8249
      if self.op.os_type is None:
8250
        raise errors.OpPrereqError("No guest OS specified",
8251
                                   errors.ECODE_INVAL)
8252
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
8253
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
8254
                                   " installation" % self.op.os_type,
8255
                                   errors.ECODE_STATE)
8256
      if self.op.disk_template is None:
8257
        raise errors.OpPrereqError("No disk template specified",
8258
                                   errors.ECODE_INVAL)
8259

    
8260
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
8261
      # Check handshake to ensure both clusters have the same domain secret
8262
      src_handshake = self.op.source_handshake
8263
      if not src_handshake:
8264
        raise errors.OpPrereqError("Missing source handshake",
8265
                                   errors.ECODE_INVAL)
8266

    
8267
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
8268
                                                           src_handshake)
8269
      if errmsg:
8270
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
8271
                                   errors.ECODE_INVAL)
8272

    
8273
      # Load and check source CA
8274
      self.source_x509_ca_pem = self.op.source_x509_ca
8275
      if not self.source_x509_ca_pem:
8276
        raise errors.OpPrereqError("Missing source X509 CA",
8277
                                   errors.ECODE_INVAL)
8278

    
8279
      try:
8280
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
8281
                                                    self._cds)
8282
      except OpenSSL.crypto.Error, err:
8283
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
8284
                                   (err, ), errors.ECODE_INVAL)
8285

    
8286
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
8287
      if errcode is not None:
8288
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
8289
                                   errors.ECODE_INVAL)
8290

    
8291
      self.source_x509_ca = cert
8292

    
8293
      src_instance_name = self.op.source_instance_name
8294
      if not src_instance_name:
8295
        raise errors.OpPrereqError("Missing source instance name",
8296
                                   errors.ECODE_INVAL)
8297

    
8298
      self.source_instance_name = \
8299
          netutils.GetHostname(name=src_instance_name).name
8300

    
8301
    else:
8302
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
8303
                                 self.op.mode, errors.ECODE_INVAL)
8304

    
8305
  def ExpandNames(self):
8306
    """ExpandNames for CreateInstance.
8307

8308
    Figure out the right locks for instance creation.
8309

8310
    """
8311
    self.needed_locks = {}
8312

    
8313
    instance_name = self.op.instance_name
8314
    # this is just a preventive check, but someone might still add this
8315
    # instance in the meantime, and creation will fail at lock-add time
8316
    if instance_name in self.cfg.GetInstanceList():
8317
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
8318
                                 instance_name, errors.ECODE_EXISTS)
8319

    
8320
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
8321

    
8322
    if self.op.iallocator:
8323
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8324
    else:
8325
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
8326
      nodelist = [self.op.pnode]
8327
      if self.op.snode is not None:
8328
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
8329
        nodelist.append(self.op.snode)
8330
      self.needed_locks[locking.LEVEL_NODE] = nodelist
8331

    
8332
    # in case of import lock the source node too
8333
    if self.op.mode == constants.INSTANCE_IMPORT:
8334
      src_node = self.op.src_node
8335
      src_path = self.op.src_path
8336

    
8337
      if src_path is None:
8338
        self.op.src_path = src_path = self.op.instance_name
8339

    
8340
      if src_node is None:
8341
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8342
        self.op.src_node = None
8343
        if os.path.isabs(src_path):
8344
          raise errors.OpPrereqError("Importing an instance from a path"
8345
                                     " requires a source node option",
8346
                                     errors.ECODE_INVAL)
8347
      else:
8348
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
8349
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
8350
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
8351
        if not os.path.isabs(src_path):
8352
          self.op.src_path = src_path = \
8353
            utils.PathJoin(constants.EXPORT_DIR, src_path)
8354

    
8355
  def _RunAllocator(self):
8356
    """Run the allocator based on input opcode.
8357

8358
    """
8359
    nics = [n.ToDict() for n in self.nics]
8360
    ial = IAllocator(self.cfg, self.rpc,
8361
                     mode=constants.IALLOCATOR_MODE_ALLOC,
8362
                     name=self.op.instance_name,
8363
                     disk_template=self.op.disk_template,
8364
                     tags=self.op.tags,
8365
                     os=self.op.os_type,
8366
                     vcpus=self.be_full[constants.BE_VCPUS],
8367
                     memory=self.be_full[constants.BE_MEMORY],
8368
                     disks=self.disks,
8369
                     nics=nics,
8370
                     hypervisor=self.op.hypervisor,
8371
                     )
8372

    
8373
    ial.Run(self.op.iallocator)
8374

    
8375
    if not ial.success:
8376
      raise errors.OpPrereqError("Can't compute nodes using"
8377
                                 " iallocator '%s': %s" %
8378
                                 (self.op.iallocator, ial.info),
8379
                                 errors.ECODE_NORES)
8380
    if len(ial.result) != ial.required_nodes:
8381
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
8382
                                 " of nodes (%s), required %s" %
8383
                                 (self.op.iallocator, len(ial.result),
8384
                                  ial.required_nodes), errors.ECODE_FAULT)
8385
    self.op.pnode = ial.result[0]
8386
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
8387
                 self.op.instance_name, self.op.iallocator,
8388
                 utils.CommaJoin(ial.result))
8389
    if ial.required_nodes == 2:
8390
      self.op.snode = ial.result[1]
8391

    
8392
  def BuildHooksEnv(self):
8393
    """Build hooks env.
8394

8395
    This runs on master, primary and secondary nodes of the instance.
8396

8397
    """
8398
    env = {
8399
      "ADD_MODE": self.op.mode,
8400
      }
8401
    if self.op.mode == constants.INSTANCE_IMPORT:
8402
      env["SRC_NODE"] = self.op.src_node
8403
      env["SRC_PATH"] = self.op.src_path
8404
      env["SRC_IMAGES"] = self.src_images
8405

    
8406
    env.update(_BuildInstanceHookEnv(
8407
      name=self.op.instance_name,
8408
      primary_node=self.op.pnode,
8409
      secondary_nodes=self.secondaries,
8410
      status=self.op.start,
8411
      os_type=self.op.os_type,
8412
      memory=self.be_full[constants.BE_MEMORY],
8413
      vcpus=self.be_full[constants.BE_VCPUS],
8414
      nics=_NICListToTuple(self, self.nics),
8415
      disk_template=self.op.disk_template,
8416
      disks=[(d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
8417
             for d in self.disks],
8418
      bep=self.be_full,
8419
      hvp=self.hv_full,
8420
      hypervisor_name=self.op.hypervisor,
8421
      tags=self.op.tags,
8422
    ))
8423

    
8424
    return env
8425

    
8426
  def BuildHooksNodes(self):
8427
    """Build hooks nodes.
8428

8429
    """
8430
    nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
8431
    return nl, nl
8432

    
8433
  def _ReadExportInfo(self):
8434
    """Reads the export information from disk.
8435

8436
    It will override the opcode source node and path with the actual
8437
    information, if these two were not specified before.
8438

8439
    @return: the export information
8440

8441
    """
8442
    assert self.op.mode == constants.INSTANCE_IMPORT
8443

    
8444
    src_node = self.op.src_node
8445
    src_path = self.op.src_path
8446

    
8447
    if src_node is None:
8448
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
8449
      exp_list = self.rpc.call_export_list(locked_nodes)
8450
      found = False
8451
      for node in exp_list:
8452
        if exp_list[node].fail_msg:
8453
          continue
8454
        if src_path in exp_list[node].payload:
8455
          found = True
8456
          self.op.src_node = src_node = node
8457
          self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
8458
                                                       src_path)
8459
          break
8460
      if not found:
8461
        raise errors.OpPrereqError("No export found for relative path %s" %
8462
                                    src_path, errors.ECODE_INVAL)
8463

    
8464
    _CheckNodeOnline(self, src_node)
8465
    result = self.rpc.call_export_info(src_node, src_path)
8466
    result.Raise("No export or invalid export found in dir %s" % src_path)
8467

    
8468
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
8469
    if not export_info.has_section(constants.INISECT_EXP):
8470
      raise errors.ProgrammerError("Corrupted export config",
8471
                                   errors.ECODE_ENVIRON)
8472

    
8473
    ei_version = export_info.get(constants.INISECT_EXP, "version")
8474
    if (int(ei_version) != constants.EXPORT_VERSION):
8475
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
8476
                                 (ei_version, constants.EXPORT_VERSION),
8477
                                 errors.ECODE_ENVIRON)
8478
    return export_info
8479

    
8480
  def _ReadExportParams(self, einfo):
8481
    """Use export parameters as defaults.
8482

8483
    In case the opcode doesn't specify (as in override) some instance
8484
    parameters, then try to use them from the export information, if
8485
    that declares them.
8486

8487
    """
8488
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
8489

    
8490
    if self.op.disk_template is None:
8491
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
8492
        self.op.disk_template = einfo.get(constants.INISECT_INS,
8493
                                          "disk_template")
8494
        if self.op.disk_template not in constants.DISK_TEMPLATES:
8495
          raise errors.OpPrereqError("Disk template specified in configuration"
8496
                                     " file is not one of the allowed values:"
8497
                                     " %s" % " ".join(constants.DISK_TEMPLATES))
8498
      else:
8499
        raise errors.OpPrereqError("No disk template specified and the export"
8500
                                   " is missing the disk_template information",
8501
                                   errors.ECODE_INVAL)
8502

    
8503
    if not self.op.disks:
8504
      disks = []
8505
      # TODO: import the disk iv_name too
8506
      for idx in range(constants.MAX_DISKS):
8507
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
8508
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
8509
          disks.append({constants.IDISK_SIZE: disk_sz})
8510
      self.op.disks = disks
8511
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
8512
        raise errors.OpPrereqError("No disk info specified and the export"
8513
                                   " is missing the disk information",
8514
                                   errors.ECODE_INVAL)
8515

    
8516
    if not self.op.nics:
8517
      nics = []
8518
      for idx in range(constants.MAX_NICS):
8519
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
8520
          ndict = {}
8521
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
8522
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
8523
            ndict[name] = v
8524
          nics.append(ndict)
8525
        else:
8526
          break
8527
      self.op.nics = nics
8528

    
8529
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
8530
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
8531

    
8532
    if (self.op.hypervisor is None and
8533
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
8534
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
8535

    
8536
    if einfo.has_section(constants.INISECT_HYP):
8537
      # use the export parameters but do not override the ones
8538
      # specified by the user
8539
      for name, value in einfo.items(constants.INISECT_HYP):
8540
        if name not in self.op.hvparams:
8541
          self.op.hvparams[name] = value
8542

    
8543
    if einfo.has_section(constants.INISECT_BEP):
8544
      # use the parameters, without overriding
8545
      for name, value in einfo.items(constants.INISECT_BEP):
8546
        if name not in self.op.beparams:
8547
          self.op.beparams[name] = value
8548
    else:
8549
      # try to read the parameters old style, from the main section
8550
      for name in constants.BES_PARAMETERS:
8551
        if (name not in self.op.beparams and
8552
            einfo.has_option(constants.INISECT_INS, name)):
8553
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
8554

    
8555
    if einfo.has_section(constants.INISECT_OSP):
8556
      # use the parameters, without overriding
8557
      for name, value in einfo.items(constants.INISECT_OSP):
8558
        if name not in self.op.osparams:
8559
          self.op.osparams[name] = value
8560

    
8561
  def _RevertToDefaults(self, cluster):
8562
    """Revert the instance parameters to the default values.
8563

8564
    """
8565
    # hvparams
8566
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
8567
    for name in self.op.hvparams.keys():
8568
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
8569
        del self.op.hvparams[name]
8570
    # beparams
8571
    be_defs = cluster.SimpleFillBE({})
8572
    for name in self.op.beparams.keys():
8573
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
8574
        del self.op.beparams[name]
8575
    # nic params
8576
    nic_defs = cluster.SimpleFillNIC({})
8577
    for nic in self.op.nics:
8578
      for name in constants.NICS_PARAMETERS:
8579
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
8580
          del nic[name]
8581
    # osparams
8582
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
8583
    for name in self.op.osparams.keys():
8584
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
8585
        del self.op.osparams[name]
8586

    
8587
  def _CalculateFileStorageDir(self):
8588
    """Calculate final instance file storage dir.
8589

8590
    """
8591
    # file storage dir calculation/check
8592
    self.instance_file_storage_dir = None
8593
    if self.op.disk_template in constants.DTS_FILEBASED:
8594
      # build the full file storage dir path
8595
      joinargs = []
8596

    
8597
      if self.op.disk_template == constants.DT_SHARED_FILE:
8598
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
8599
      else:
8600
        get_fsd_fn = self.cfg.GetFileStorageDir
8601

    
8602
      cfg_storagedir = get_fsd_fn()
8603
      if not cfg_storagedir:
8604
        raise errors.OpPrereqError("Cluster file storage dir not defined")
8605
      joinargs.append(cfg_storagedir)
8606

    
8607
      if self.op.file_storage_dir is not None:
8608
        joinargs.append(self.op.file_storage_dir)
8609

    
8610
      joinargs.append(self.op.instance_name)
8611

    
8612
      # pylint: disable=W0142
8613
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
8614

    
8615
  def CheckPrereq(self):
8616
    """Check prerequisites.
8617

8618
    """
8619
    self._CalculateFileStorageDir()
8620

    
8621
    if self.op.mode == constants.INSTANCE_IMPORT:
8622
      export_info = self._ReadExportInfo()
8623
      self._ReadExportParams(export_info)
8624

    
8625
    if (not self.cfg.GetVGName() and
8626
        self.op.disk_template not in constants.DTS_NOT_LVM):
8627
      raise errors.OpPrereqError("Cluster does not support lvm-based"
8628
                                 " instances", errors.ECODE_STATE)
8629

    
8630
    if (self.op.hypervisor is None or
8631
        self.op.hypervisor == constants.VALUE_AUTO):
8632
      self.op.hypervisor = self.cfg.GetHypervisorType()
8633

    
8634
    cluster = self.cfg.GetClusterInfo()
8635
    enabled_hvs = cluster.enabled_hypervisors
8636
    if self.op.hypervisor not in enabled_hvs:
8637
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
8638
                                 " cluster (%s)" % (self.op.hypervisor,
8639
                                  ",".join(enabled_hvs)),
8640
                                 errors.ECODE_STATE)
8641

    
8642
    # Check tag validity
8643
    for tag in self.op.tags:
8644
      objects.TaggableObject.ValidateTag(tag)
8645

    
8646
    # check hypervisor parameter syntax (locally)
8647
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
8648
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
8649
                                      self.op.hvparams)
8650
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
8651
    hv_type.CheckParameterSyntax(filled_hvp)
8652
    self.hv_full = filled_hvp
8653
    # check that we don't specify global parameters on an instance
8654
    _CheckGlobalHvParams(self.op.hvparams)
8655

    
8656
    # fill and remember the beparams dict
8657
    default_beparams = cluster.beparams[constants.PP_DEFAULT]
8658
    for param, value in self.op.beparams.iteritems():
8659
      if value == constants.VALUE_AUTO:
8660
        self.op.beparams[param] = default_beparams[param]
8661
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
8662
    self.be_full = cluster.SimpleFillBE(self.op.beparams)
8663

    
8664
    # build os parameters
8665
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
8666

    
8667
    # now that hvp/bep are in final format, let's reset to defaults,
8668
    # if told to do so
8669
    if self.op.identify_defaults:
8670
      self._RevertToDefaults(cluster)
8671

    
8672
    # NIC buildup
8673
    self.nics = []
8674
    for idx, nic in enumerate(self.op.nics):
8675
      nic_mode_req = nic.get(constants.INIC_MODE, None)
8676
      nic_mode = nic_mode_req
8677
      if nic_mode is None or nic_mode == constants.VALUE_AUTO:
8678
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
8679

    
8680
      # in routed mode, for the first nic, the default ip is 'auto'
8681
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
8682
        default_ip_mode = constants.VALUE_AUTO
8683
      else:
8684
        default_ip_mode = constants.VALUE_NONE
8685

    
8686
      # ip validity checks
8687
      ip = nic.get(constants.INIC_IP, default_ip_mode)
8688
      if ip is None or ip.lower() == constants.VALUE_NONE:
8689
        nic_ip = None
8690
      elif ip.lower() == constants.VALUE_AUTO:
8691
        if not self.op.name_check:
8692
          raise errors.OpPrereqError("IP address set to auto but name checks"
8693
                                     " have been skipped",
8694
                                     errors.ECODE_INVAL)
8695
        nic_ip = self.hostname1.ip
8696
      else:
8697
        if not netutils.IPAddress.IsValid(ip):
8698
          raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
8699
                                     errors.ECODE_INVAL)
8700
        nic_ip = ip
8701

    
8702
      # TODO: check the ip address for uniqueness
8703
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
8704
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
8705
                                   errors.ECODE_INVAL)
8706

    
8707
      # MAC address verification
8708
      mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
8709
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8710
        mac = utils.NormalizeAndValidateMac(mac)
8711

    
8712
        try:
8713
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
8714
        except errors.ReservationError:
8715
          raise errors.OpPrereqError("MAC address %s already in use"
8716
                                     " in cluster" % mac,
8717
                                     errors.ECODE_NOTUNIQUE)
8718

    
8719
      #  Build nic parameters
8720
      link = nic.get(constants.INIC_LINK, None)
8721
      if link == constants.VALUE_AUTO:
8722
        link = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_LINK]
8723
      nicparams = {}
8724
      if nic_mode_req:
8725
        nicparams[constants.NIC_MODE] = nic_mode
8726
      if link:
8727
        nicparams[constants.NIC_LINK] = link
8728

    
8729
      check_params = cluster.SimpleFillNIC(nicparams)
8730
      objects.NIC.CheckParameterSyntax(check_params)
8731
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
8732

    
8733
    # disk checks/pre-build
8734
    default_vg = self.cfg.GetVGName()
8735
    self.disks = []
8736
    for disk in self.op.disks:
8737
      mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
8738
      if mode not in constants.DISK_ACCESS_SET:
8739
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
8740
                                   mode, errors.ECODE_INVAL)
8741
      size = disk.get(constants.IDISK_SIZE, None)
8742
      if size is None:
8743
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
8744
      try:
8745
        size = int(size)
8746
      except (TypeError, ValueError):
8747
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
8748
                                   errors.ECODE_INVAL)
8749

    
8750
      data_vg = disk.get(constants.IDISK_VG, default_vg)
8751
      new_disk = {
8752
        constants.IDISK_SIZE: size,
8753
        constants.IDISK_MODE: mode,
8754
        constants.IDISK_VG: data_vg,
8755
        constants.IDISK_METAVG: disk.get(constants.IDISK_METAVG, data_vg),
8756
        }
8757
      if constants.IDISK_ADOPT in disk:
8758
        new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
8759
      self.disks.append(new_disk)
8760

    
8761
    if self.op.mode == constants.INSTANCE_IMPORT:
8762
      disk_images = []
8763
      for idx in range(len(self.disks)):
8764
        option = "disk%d_dump" % idx
8765
        if export_info.has_option(constants.INISECT_INS, option):
8766
          # FIXME: are the old os-es, disk sizes, etc. useful?
8767
          export_name = export_info.get(constants.INISECT_INS, option)
8768
          image = utils.PathJoin(self.op.src_path, export_name)
8769
          disk_images.append(image)
8770
        else:
8771
          disk_images.append(False)
8772

    
8773
      self.src_images = disk_images
8774

    
8775
      old_name = export_info.get(constants.INISECT_INS, "name")
8776
      if self.op.instance_name == old_name:
8777
        for idx, nic in enumerate(self.nics):
8778
          if nic.mac == constants.VALUE_AUTO:
8779
            nic_mac_ini = "nic%d_mac" % idx
8780
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
8781

    
8782
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
8783

    
8784
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
8785
    if self.op.ip_check:
8786
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
8787
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
8788
                                   (self.check_ip, self.op.instance_name),
8789
                                   errors.ECODE_NOTUNIQUE)
8790

    
8791
    #### mac address generation
8792
    # By generating here the mac address both the allocator and the hooks get
8793
    # the real final mac address rather than the 'auto' or 'generate' value.
8794
    # There is a race condition between the generation and the instance object
8795
    # creation, which means that we know the mac is valid now, but we're not
8796
    # sure it will be when we actually add the instance. If things go bad
8797
    # adding the instance will abort because of a duplicate mac, and the
8798
    # creation job will fail.
8799
    for nic in self.nics:
8800
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8801
        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
8802

    
8803
    #### allocator run
8804

    
8805
    if self.op.iallocator is not None:
8806
      self._RunAllocator()
8807

    
8808
    #### node related checks
8809

    
8810
    # check primary node
8811
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
8812
    assert self.pnode is not None, \
8813
      "Cannot retrieve locked node %s" % self.op.pnode
8814
    if pnode.offline:
8815
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
8816
                                 pnode.name, errors.ECODE_STATE)
8817
    if pnode.drained:
8818
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
8819
                                 pnode.name, errors.ECODE_STATE)
8820
    if not pnode.vm_capable:
8821
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
8822
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
8823

    
8824
    self.secondaries = []
8825

    
8826
    # mirror node verification
8827
    if self.op.disk_template in constants.DTS_INT_MIRROR:
8828
      if self.op.snode == pnode.name:
8829
        raise errors.OpPrereqError("The secondary node cannot be the"
8830
                                   " primary node", errors.ECODE_INVAL)
8831
      _CheckNodeOnline(self, self.op.snode)
8832
      _CheckNodeNotDrained(self, self.op.snode)
8833
      _CheckNodeVmCapable(self, self.op.snode)
8834
      self.secondaries.append(self.op.snode)
8835

    
8836
    nodenames = [pnode.name] + self.secondaries
8837

    
8838
    if not self.adopt_disks:
8839
      # Check lv size requirements, if not adopting
8840
      req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
8841
      _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
8842

    
8843
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
8844
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
8845
                                disk[constants.IDISK_ADOPT])
8846
                     for disk in self.disks])
8847
      if len(all_lvs) != len(self.disks):
8848
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
8849
                                   errors.ECODE_INVAL)
8850
      for lv_name in all_lvs:
8851
        try:
8852
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
8853
          # to ReserveLV uses the same syntax
8854
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
8855
        except errors.ReservationError:
8856
          raise errors.OpPrereqError("LV named %s used by another instance" %
8857
                                     lv_name, errors.ECODE_NOTUNIQUE)
8858

    
8859
      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
8860
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
8861

    
8862
      node_lvs = self.rpc.call_lv_list([pnode.name],
8863
                                       vg_names.payload.keys())[pnode.name]
8864
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
8865
      node_lvs = node_lvs.payload
8866

    
8867
      delta = all_lvs.difference(node_lvs.keys())
8868
      if delta:
8869
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
8870
                                   utils.CommaJoin(delta),
8871
                                   errors.ECODE_INVAL)
8872
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
8873
      if online_lvs:
8874
        raise errors.OpPrereqError("Online logical volumes found, cannot"
8875
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
8876
                                   errors.ECODE_STATE)
8877
      # update the size of disk based on what is found
8878
      for dsk in self.disks:
8879
        dsk[constants.IDISK_SIZE] = \
8880
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
8881
                                        dsk[constants.IDISK_ADOPT])][0]))
8882

    
8883
    elif self.op.disk_template == constants.DT_BLOCK:
8884
      # Normalize and de-duplicate device paths
8885
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
8886
                       for disk in self.disks])
8887
      if len(all_disks) != len(self.disks):
8888
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
8889
                                   errors.ECODE_INVAL)
8890
      baddisks = [d for d in all_disks
8891
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
8892
      if baddisks:
8893
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
8894
                                   " cannot be adopted" %
8895
                                   (", ".join(baddisks),
8896
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
8897
                                   errors.ECODE_INVAL)
8898

    
8899
      node_disks = self.rpc.call_bdev_sizes([pnode.name],
8900
                                            list(all_disks))[pnode.name]
8901
      node_disks.Raise("Cannot get block device information from node %s" %
8902
                       pnode.name)
8903
      node_disks = node_disks.payload
8904
      delta = all_disks.difference(node_disks.keys())
8905
      if delta:
8906
        raise errors.OpPrereqError("Missing block device(s): %s" %
8907
                                   utils.CommaJoin(delta),
8908
                                   errors.ECODE_INVAL)
8909
      for dsk in self.disks:
8910
        dsk[constants.IDISK_SIZE] = \
8911
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
8912

    
8913
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
8914

    
8915
    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
8916
    # check OS parameters (remotely)
8917
    _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
8918

    
8919
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
8920

    
8921
    # memory check on primary node
8922
    if self.op.start:
8923
      _CheckNodeFreeMemory(self, self.pnode.name,
8924
                           "creating instance %s" % self.op.instance_name,
8925
                           self.be_full[constants.BE_MEMORY],
8926
                           self.op.hypervisor)
8927

    
8928
    self.dry_run_result = list(nodenames)
8929

    
8930
  def Exec(self, feedback_fn):
8931
    """Create and add the instance to the cluster.
8932

8933
    """
8934
    instance = self.op.instance_name
8935
    pnode_name = self.pnode.name
8936

    
8937
    ht_kind = self.op.hypervisor
8938
    if ht_kind in constants.HTS_REQ_PORT:
8939
      network_port = self.cfg.AllocatePort()
8940
    else:
8941
      network_port = None
8942

    
8943
    disks = _GenerateDiskTemplate(self,
8944
                                  self.op.disk_template,
8945
                                  instance, pnode_name,
8946
                                  self.secondaries,
8947
                                  self.disks,
8948
                                  self.instance_file_storage_dir,
8949
                                  self.op.file_driver,
8950
                                  0,
8951
                                  feedback_fn)
8952

    
8953
    iobj = objects.Instance(name=instance, os=self.op.os_type,
8954
                            primary_node=pnode_name,
8955
                            nics=self.nics, disks=disks,
8956
                            disk_template=self.op.disk_template,
8957
                            admin_up=False,
8958
                            network_port=network_port,
8959
                            beparams=self.op.beparams,
8960
                            hvparams=self.op.hvparams,
8961
                            hypervisor=self.op.hypervisor,
8962
                            osparams=self.op.osparams,
8963
                            )
8964

    
8965
    if self.op.tags:
8966
      for tag in self.op.tags:
8967
        iobj.AddTag(tag)
8968

    
8969
    if self.adopt_disks:
8970
      if self.op.disk_template == constants.DT_PLAIN:
8971
        # rename LVs to the newly-generated names; we need to construct
8972
        # 'fake' LV disks with the old data, plus the new unique_id
8973
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
8974
        rename_to = []
8975
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
8976
          rename_to.append(t_dsk.logical_id)
8977
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
8978
          self.cfg.SetDiskID(t_dsk, pnode_name)
8979
        result = self.rpc.call_blockdev_rename(pnode_name,
8980
                                               zip(tmp_disks, rename_to))
8981
        result.Raise("Failed to rename adoped LVs")
8982
    else:
8983
      feedback_fn("* creating instance disks...")
8984
      try:
8985
        _CreateDisks(self, iobj)
8986
      except errors.OpExecError:
8987
        self.LogWarning("Device creation failed, reverting...")
8988
        try:
8989
          _RemoveDisks(self, iobj)
8990
        finally:
8991
          self.cfg.ReleaseDRBDMinors(instance)
8992
          raise
8993

    
8994
    feedback_fn("adding instance %s to cluster config" % instance)
8995

    
8996
    self.cfg.AddInstance(iobj, self.proc.GetECId())
8997

    
8998
    # Declare that we don't want to remove the instance lock anymore, as we've
8999
    # added the instance to the config
9000
    del self.remove_locks[locking.LEVEL_INSTANCE]
9001

    
9002
    if self.op.mode == constants.INSTANCE_IMPORT:
9003
      # Release unused nodes
9004
      _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
9005
    else:
9006
      # Release all nodes
9007
      _ReleaseLocks(self, locking.LEVEL_NODE)
9008

    
9009
    disk_abort = False
9010
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
9011
      feedback_fn("* wiping instance disks...")
9012
      try:
9013
        _WipeDisks(self, iobj)
9014
      except errors.OpExecError, err:
9015
        logging.exception("Wiping disks failed")
9016
        self.LogWarning("Wiping instance disks failed (%s)", err)
9017
        disk_abort = True
9018

    
9019
    if disk_abort:
9020
      # Something is already wrong with the disks, don't do anything else
9021
      pass
9022
    elif self.op.wait_for_sync:
9023
      disk_abort = not _WaitForSync(self, iobj)
9024
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
9025
      # make sure the disks are not degraded (still sync-ing is ok)
9026
      feedback_fn("* checking mirrors status")
9027
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
9028
    else:
9029
      disk_abort = False
9030

    
9031
    if disk_abort:
9032
      _RemoveDisks(self, iobj)
9033
      self.cfg.RemoveInstance(iobj.name)
9034
      # Make sure the instance lock gets removed
9035
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
9036
      raise errors.OpExecError("There are some degraded disks for"
9037
                               " this instance")
9038

    
9039
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
9040
      if self.op.mode == constants.INSTANCE_CREATE:
9041
        if not self.op.no_install:
9042
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
9043
                        not self.op.wait_for_sync)
9044
          if pause_sync:
9045
            feedback_fn("* pausing disk sync to install instance OS")
9046
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
9047
                                                              iobj.disks, True)
9048
            for idx, success in enumerate(result.payload):
9049
              if not success:
9050
                logging.warn("pause-sync of instance %s for disk %d failed",
9051
                             instance, idx)
9052

    
9053
          feedback_fn("* running the instance OS create scripts...")
9054
          # FIXME: pass debug option from opcode to backend
9055
          os_add_result = \
9056
            self.rpc.call_instance_os_add(pnode_name, iobj, False,
9057
                                          self.op.debug_level)
9058
          if pause_sync:
9059
            feedback_fn("* resuming disk sync")
9060
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
9061
                                                              iobj.disks, False)
9062
            for idx, success in enumerate(result.payload):
9063
              if not success:
9064
                logging.warn("resume-sync of instance %s for disk %d failed",
9065
                             instance, idx)
9066

    
9067
          os_add_result.Raise("Could not add os for instance %s"
9068
                              " on node %s" % (instance, pnode_name))
9069

    
9070
      elif self.op.mode == constants.INSTANCE_IMPORT:
9071
        feedback_fn("* running the instance OS import scripts...")
9072

    
9073
        transfers = []
9074

    
9075
        for idx, image in enumerate(self.src_images):
9076
          if not image:
9077
            continue
9078

    
9079
          # FIXME: pass debug option from opcode to backend
9080
          dt = masterd.instance.DiskTransfer("disk/%s" % idx,
9081
                                             constants.IEIO_FILE, (image, ),
9082
                                             constants.IEIO_SCRIPT,
9083
                                             (iobj.disks[idx], idx),
9084
                                             None)
9085
          transfers.append(dt)
9086

    
9087
        import_result = \
9088
          masterd.instance.TransferInstanceData(self, feedback_fn,
9089
                                                self.op.src_node, pnode_name,
9090
                                                self.pnode.secondary_ip,
9091
                                                iobj, transfers)
9092
        if not compat.all(import_result):
9093
          self.LogWarning("Some disks for instance %s on node %s were not"
9094
                          " imported successfully" % (instance, pnode_name))
9095

    
9096
      elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
9097
        feedback_fn("* preparing remote import...")
9098
        # The source cluster will stop the instance before attempting to make a
9099
        # connection. In some cases stopping an instance can take a long time,
9100
        # hence the shutdown timeout is added to the connection timeout.
9101
        connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
9102
                           self.op.source_shutdown_timeout)
9103
        timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
9104

    
9105
        assert iobj.primary_node == self.pnode.name
9106
        disk_results = \
9107
          masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
9108
                                        self.source_x509_ca,
9109
                                        self._cds, timeouts)
9110
        if not compat.all(disk_results):
9111
          # TODO: Should the instance still be started, even if some disks
9112
          # failed to import (valid for local imports, too)?
9113
          self.LogWarning("Some disks for instance %s on node %s were not"
9114
                          " imported successfully" % (instance, pnode_name))
9115

    
9116
        # Run rename script on newly imported instance
9117
        assert iobj.name == instance
9118
        feedback_fn("Running rename script for %s" % instance)
9119
        result = self.rpc.call_instance_run_rename(pnode_name, iobj,
9120
                                                   self.source_instance_name,
9121
                                                   self.op.debug_level)
9122
        if result.fail_msg:
9123
          self.LogWarning("Failed to run rename script for %s on node"
9124
                          " %s: %s" % (instance, pnode_name, result.fail_msg))
9125

    
9126
      else:
9127
        # also checked in the prereq part
9128
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
9129
                                     % self.op.mode)
9130

    
9131
    if self.op.start:
9132
      iobj.admin_up = True
9133
      self.cfg.Update(iobj, feedback_fn)
9134
      logging.info("Starting instance %s on node %s", instance, pnode_name)
9135
      feedback_fn("* starting instance...")
9136
      result = self.rpc.call_instance_start(pnode_name, iobj,
9137
                                            None, None, False)
9138
      result.Raise("Could not start instance")
9139

    
9140
    return list(iobj.all_nodes)
9141

    
9142

    
9143
class LUInstanceConsole(NoHooksLU):
9144
  """Connect to an instance's console.
9145

9146
  This is somewhat special in that it returns the command line that
9147
  you need to run on the master node in order to connect to the
9148
  console.
9149

9150
  """
9151
  REQ_BGL = False
9152

    
9153
  def ExpandNames(self):
9154
    self._ExpandAndLockInstance()
9155

    
9156
  def CheckPrereq(self):
9157
    """Check prerequisites.
9158

9159
    This checks that the instance is in the cluster.
9160

9161
    """
9162
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
9163
    assert self.instance is not None, \
9164
      "Cannot retrieve locked instance %s" % self.op.instance_name
9165
    _CheckNodeOnline(self, self.instance.primary_node)
9166

    
9167
  def Exec(self, feedback_fn):
9168
    """Connect to the console of an instance
9169

9170
    """
9171
    instance = self.instance
9172
    node = instance.primary_node
9173

    
9174
    node_insts = self.rpc.call_instance_list([node],
9175
                                             [instance.hypervisor])[node]
9176
    node_insts.Raise("Can't get node information from %s" % node)
9177

    
9178
    if instance.name not in node_insts.payload:
9179
      if instance.admin_up:
9180
        state = constants.INSTST_ERRORDOWN
9181
      else:
9182
        state = constants.INSTST_ADMINDOWN
9183
      raise errors.OpExecError("Instance %s is not running (state %s)" %
9184
                               (instance.name, state))
9185

    
9186
    logging.debug("Connecting to console of %s on %s", instance.name, node)
9187

    
9188
    return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
9189

    
9190

    
9191
def _GetInstanceConsole(cluster, instance):
9192
  """Returns console information for an instance.
9193

9194
  @type cluster: L{objects.Cluster}
9195
  @type instance: L{objects.Instance}
9196
  @rtype: dict
9197

9198
  """
9199
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
9200
  # beparams and hvparams are passed separately, to avoid editing the
9201
  # instance and then saving the defaults in the instance itself.
9202
  hvparams = cluster.FillHV(instance)
9203
  beparams = cluster.FillBE(instance)
9204
  console = hyper.GetInstanceConsole(instance, hvparams, beparams)
9205

    
9206
  assert console.instance == instance.name
9207
  assert console.Validate()
9208

    
9209
  return console.ToDict()
9210

    
9211

    
9212
class LUInstanceReplaceDisks(LogicalUnit):
9213
  """Replace the disks of an instance.
9214

9215
  """
9216
  HPATH = "mirrors-replace"
9217
  HTYPE = constants.HTYPE_INSTANCE
9218
  REQ_BGL = False
9219

    
9220
  def CheckArguments(self):
9221
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
9222
                                  self.op.iallocator)
9223

    
9224
  def ExpandNames(self):
9225
    self._ExpandAndLockInstance()
9226

    
9227
    assert locking.LEVEL_NODE not in self.needed_locks
9228
    assert locking.LEVEL_NODEGROUP not in self.needed_locks
9229

    
9230
    assert self.op.iallocator is None or self.op.remote_node is None, \
9231
      "Conflicting options"
9232

    
9233
    if self.op.remote_node is not None:
9234
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
9235

    
9236
      # Warning: do not remove the locking of the new secondary here
9237
      # unless DRBD8.AddChildren is changed to work in parallel;
9238
      # currently it doesn't since parallel invocations of
9239
      # FindUnusedMinor will conflict
9240
      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
9241
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
9242
    else:
9243
      self.needed_locks[locking.LEVEL_NODE] = []
9244
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
9245

    
9246
      if self.op.iallocator is not None:
9247
        # iallocator will select a new node in the same group
9248
        self.needed_locks[locking.LEVEL_NODEGROUP] = []
9249

    
9250
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
9251
                                   self.op.iallocator, self.op.remote_node,
9252
                                   self.op.disks, False, self.op.early_release)
9253

    
9254
    self.tasklets = [self.replacer]
9255

    
9256
  def DeclareLocks(self, level):
9257
    if level == locking.LEVEL_NODEGROUP:
9258
      assert self.op.remote_node is None
9259
      assert self.op.iallocator is not None
9260
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
9261

    
9262
      self.share_locks[locking.LEVEL_NODEGROUP] = 1
9263
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
9264
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
9265

    
9266
    elif level == locking.LEVEL_NODE:
9267
      if self.op.iallocator is not None:
9268
        assert self.op.remote_node is None
9269
        assert not self.needed_locks[locking.LEVEL_NODE]
9270

    
9271
        # Lock member nodes of all locked groups
9272
        self.needed_locks[locking.LEVEL_NODE] = [node_name
9273
          for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
9274
          for node_name in self.cfg.GetNodeGroup(group_uuid).members]
9275
      else:
9276
        self._LockInstancesNodes()
9277

    
9278
  def BuildHooksEnv(self):
9279
    """Build hooks env.
9280

9281
    This runs on the master, the primary and all the secondaries.
9282

9283
    """
9284
    instance = self.replacer.instance
9285
    env = {
9286
      "MODE": self.op.mode,
9287
      "NEW_SECONDARY": self.op.remote_node,
9288
      "OLD_SECONDARY": instance.secondary_nodes[0],
9289
      }
9290
    env.update(_BuildInstanceHookEnvByObject(self, instance))
9291
    return env
9292

    
9293
  def BuildHooksNodes(self):
9294
    """Build hooks nodes.
9295

9296
    """
9297
    instance = self.replacer.instance
9298
    nl = [
9299
      self.cfg.GetMasterNode(),
9300
      instance.primary_node,
9301
      ]
9302
    if self.op.remote_node is not None:
9303
      nl.append(self.op.remote_node)
9304
    return nl, nl
9305

    
9306
  def CheckPrereq(self):
9307
    """Check prerequisites.
9308

9309
    """
9310
    assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
9311
            self.op.iallocator is None)
9312

    
9313
    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
9314
    if owned_groups:
9315
      _CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
9316

    
9317
    return LogicalUnit.CheckPrereq(self)
9318

    
9319

    
9320
class TLReplaceDisks(Tasklet):
9321
  """Replaces disks for an instance.
9322

9323
  Note: Locking is not within the scope of this class.
9324

9325
  """
9326
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
9327
               disks, delay_iallocator, early_release):
9328
    """Initializes this class.
9329

9330
    """
9331
    Tasklet.__init__(self, lu)
9332

    
9333
    # Parameters
9334
    self.instance_name = instance_name
9335
    self.mode = mode
9336
    self.iallocator_name = iallocator_name
9337
    self.remote_node = remote_node
9338
    self.disks = disks
9339
    self.delay_iallocator = delay_iallocator
9340
    self.early_release = early_release
9341

    
9342
    # Runtime data
9343
    self.instance = None
9344
    self.new_node = None
9345
    self.target_node = None
9346
    self.other_node = None
9347
    self.remote_node_info = None
9348
    self.node_secondary_ip = None
9349

    
9350
  @staticmethod
9351
  def CheckArguments(mode, remote_node, iallocator):
9352
    """Helper function for users of this class.
9353

9354
    """
9355
    # check for valid parameter combination
9356
    if mode == constants.REPLACE_DISK_CHG:
9357
      if remote_node is None and iallocator is None:
9358
        raise errors.OpPrereqError("When changing the secondary either an"
9359
                                   " iallocator script must be used or the"
9360
                                   " new node given", errors.ECODE_INVAL)
9361

    
9362
      if remote_node is not None and iallocator is not None:
9363
        raise errors.OpPrereqError("Give either the iallocator or the new"
9364
                                   " secondary, not both", errors.ECODE_INVAL)
9365

    
9366
    elif remote_node is not None or iallocator is not None:
9367
      # Not replacing the secondary
9368
      raise errors.OpPrereqError("The iallocator and new node options can"
9369
                                 " only be used when changing the"
9370
                                 " secondary node", errors.ECODE_INVAL)
9371

    
9372
  @staticmethod
9373
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
9374
    """Compute a new secondary node using an IAllocator.
9375

9376
    """
9377
    ial = IAllocator(lu.cfg, lu.rpc,
9378
                     mode=constants.IALLOCATOR_MODE_RELOC,
9379
                     name=instance_name,
9380
                     relocate_from=list(relocate_from))
9381

    
9382
    ial.Run(iallocator_name)
9383

    
9384
    if not ial.success:
9385
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
9386
                                 " %s" % (iallocator_name, ial.info),
9387
                                 errors.ECODE_NORES)
9388

    
9389
    if len(ial.result) != ial.required_nodes:
9390
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
9391
                                 " of nodes (%s), required %s" %
9392
                                 (iallocator_name,
9393
                                  len(ial.result), ial.required_nodes),
9394
                                 errors.ECODE_FAULT)
9395

    
9396
    remote_node_name = ial.result[0]
9397

    
9398
    lu.LogInfo("Selected new secondary for instance '%s': %s",
9399
               instance_name, remote_node_name)
9400

    
9401
    return remote_node_name
9402

    
9403
  def _FindFaultyDisks(self, node_name):
9404
    """Wrapper for L{_FindFaultyInstanceDisks}.
9405

9406
    """
9407
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
9408
                                    node_name, True)
9409

    
9410
  def _CheckDisksActivated(self, instance):
9411
    """Checks if the instance disks are activated.
9412

9413
    @param instance: The instance to check disks
9414
    @return: True if they are activated, False otherwise
9415

9416
    """
9417
    nodes = instance.all_nodes
9418

    
9419
    for idx, dev in enumerate(instance.disks):
9420
      for node in nodes:
9421
        self.lu.LogInfo("Checking disk/%d on %s", idx, node)
9422
        self.cfg.SetDiskID(dev, node)
9423

    
9424
        result = self.rpc.call_blockdev_find(node, dev)
9425

    
9426
        if result.offline:
9427
          continue
9428
        elif result.fail_msg or not result.payload:
9429
          return False
9430

    
9431
    return True
9432

    
9433
  def CheckPrereq(self):
9434
    """Check prerequisites.
9435

9436
    This checks that the instance is in the cluster.
9437

9438
    """
9439
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
9440
    assert instance is not None, \
9441
      "Cannot retrieve locked instance %s" % self.instance_name
9442

    
9443
    if instance.disk_template != constants.DT_DRBD8:
9444
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
9445
                                 " instances", errors.ECODE_INVAL)
9446

    
9447
    if len(instance.secondary_nodes) != 1:
9448
      raise errors.OpPrereqError("The instance has a strange layout,"
9449
                                 " expected one secondary but found %d" %
9450
                                 len(instance.secondary_nodes),
9451
                                 errors.ECODE_FAULT)
9452

    
9453
    if not self.delay_iallocator:
9454
      self._CheckPrereq2()
9455

    
9456
  def _CheckPrereq2(self):
9457
    """Check prerequisites, second part.
9458

9459
    This function should always be part of CheckPrereq. It was separated and is
9460
    now called from Exec because during node evacuation iallocator was only
9461
    called with an unmodified cluster model, not taking planned changes into
9462
    account.
9463

9464
    """
9465
    instance = self.instance
9466
    secondary_node = instance.secondary_nodes[0]
9467

    
9468
    if self.iallocator_name is None:
9469
      remote_node = self.remote_node
9470
    else:
9471
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
9472
                                       instance.name, instance.secondary_nodes)
9473

    
9474
    if remote_node is None:
9475
      self.remote_node_info = None
9476
    else:
9477
      assert remote_node in self.lu.owned_locks(locking.LEVEL_NODE), \
9478
             "Remote node '%s' is not locked" % remote_node
9479

    
9480
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
9481
      assert self.remote_node_info is not None, \
9482
        "Cannot retrieve locked node %s" % remote_node
9483

    
9484
    if remote_node == self.instance.primary_node:
9485
      raise errors.OpPrereqError("The specified node is the primary node of"
9486
                                 " the instance", errors.ECODE_INVAL)
9487

    
9488
    if remote_node == secondary_node:
9489
      raise errors.OpPrereqError("The specified node is already the"
9490
                                 " secondary node of the instance",
9491
                                 errors.ECODE_INVAL)
9492

    
9493
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
9494
                                    constants.REPLACE_DISK_CHG):
9495
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
9496
                                 errors.ECODE_INVAL)
9497

    
9498
    if self.mode == constants.REPLACE_DISK_AUTO:
9499
      if not self._CheckDisksActivated(instance):
9500
        raise errors.OpPrereqError("Please run activate-disks on instance %s"
9501
                                   " first" % self.instance_name,
9502
                                   errors.ECODE_STATE)
9503
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
9504
      faulty_secondary = self._FindFaultyDisks(secondary_node)
9505

    
9506
      if faulty_primary and faulty_secondary:
9507
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
9508
                                   " one node and can not be repaired"
9509
                                   " automatically" % self.instance_name,
9510
                                   errors.ECODE_STATE)
9511

    
9512
      if faulty_primary:
9513
        self.disks = faulty_primary
9514
        self.target_node = instance.primary_node
9515
        self.other_node = secondary_node
9516
        check_nodes = [self.target_node, self.other_node]
9517
      elif faulty_secondary:
9518
        self.disks = faulty_secondary
9519
        self.target_node = secondary_node
9520
        self.other_node = instance.primary_node
9521
        check_nodes = [self.target_node, self.other_node]
9522
      else:
9523
        self.disks = []
9524
        check_nodes = []
9525

    
9526
    else:
9527
      # Non-automatic modes
9528
      if self.mode == constants.REPLACE_DISK_PRI:
9529
        self.target_node = instance.primary_node
9530
        self.other_node = secondary_node
9531
        check_nodes = [self.target_node, self.other_node]
9532

    
9533
      elif self.mode == constants.REPLACE_DISK_SEC:
9534
        self.target_node = secondary_node
9535
        self.other_node = instance.primary_node
9536
        check_nodes = [self.target_node, self.other_node]
9537

    
9538
      elif self.mode == constants.REPLACE_DISK_CHG:
9539
        self.new_node = remote_node
9540
        self.other_node = instance.primary_node
9541
        self.target_node = secondary_node
9542
        check_nodes = [self.new_node, self.other_node]
9543

    
9544
        _CheckNodeNotDrained(self.lu, remote_node)
9545
        _CheckNodeVmCapable(self.lu, remote_node)
9546

    
9547
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
9548
        assert old_node_info is not None
9549
        if old_node_info.offline and not self.early_release:
9550
          # doesn't make sense to delay the release
9551
          self.early_release = True
9552
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
9553
                          " early-release mode", secondary_node)
9554

    
9555
      else:
9556
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
9557
                                     self.mode)
9558

    
9559
      # If not specified all disks should be replaced
9560
      if not self.disks:
9561
        self.disks = range(len(self.instance.disks))
9562

    
9563
    for node in check_nodes:
9564
      _CheckNodeOnline(self.lu, node)
9565

    
9566
    touched_nodes = frozenset(node_name for node_name in [self.new_node,
9567
                                                          self.other_node,
9568
                                                          self.target_node]
9569
                              if node_name is not None)
9570

    
9571
    # Release unneeded node locks
9572
    _ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
9573

    
9574
    # Release any owned node group
9575
    if self.lu.glm.is_owned(locking.LEVEL_NODEGROUP):
9576
      _ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
9577

    
9578
    # Check whether disks are valid
9579
    for disk_idx in self.disks:
9580
      instance.FindDisk(disk_idx)
9581

    
9582
    # Get secondary node IP addresses
9583
    self.node_secondary_ip = dict((name, node.secondary_ip) for (name, node)
9584
                                  in self.cfg.GetMultiNodeInfo(touched_nodes))
9585

    
9586
  def Exec(self, feedback_fn):
9587
    """Execute disk replacement.
9588

9589
    This dispatches the disk replacement to the appropriate handler.
9590

9591
    """
9592
    if self.delay_iallocator:
9593
      self._CheckPrereq2()
9594

    
9595
    if __debug__:
9596
      # Verify owned locks before starting operation
9597
      owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
9598
      assert set(owned_nodes) == set(self.node_secondary_ip), \
9599
          ("Incorrect node locks, owning %s, expected %s" %
9600
           (owned_nodes, self.node_secondary_ip.keys()))
9601

    
9602
      owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
9603
      assert list(owned_instances) == [self.instance_name], \
9604
          "Instance '%s' not locked" % self.instance_name
9605

    
9606
      assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
9607
          "Should not own any node group lock at this point"
9608

    
9609
    if not self.disks:
9610
      feedback_fn("No disks need replacement")
9611
      return
9612

    
9613
    feedback_fn("Replacing disk(s) %s for %s" %
9614
                (utils.CommaJoin(self.disks), self.instance.name))
9615

    
9616
    activate_disks = (not self.instance.admin_up)
9617

    
9618
    # Activate the instance disks if we're replacing them on a down instance
9619
    if activate_disks:
9620
      _StartInstanceDisks(self.lu, self.instance, True)
9621

    
9622
    try:
9623
      # Should we replace the secondary node?
9624
      if self.new_node is not None:
9625
        fn = self._ExecDrbd8Secondary
9626
      else:
9627
        fn = self._ExecDrbd8DiskOnly
9628

    
9629
      result = fn(feedback_fn)
9630
    finally:
9631
      # Deactivate the instance disks if we're replacing them on a
9632
      # down instance
9633
      if activate_disks:
9634
        _SafeShutdownInstanceDisks(self.lu, self.instance)
9635

    
9636
    if __debug__:
9637
      # Verify owned locks
9638
      owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
9639
      nodes = frozenset(self.node_secondary_ip)
9640
      assert ((self.early_release and not owned_nodes) or
9641
              (not self.early_release and not (set(owned_nodes) - nodes))), \
9642
        ("Not owning the correct locks, early_release=%s, owned=%r,"
9643
         " nodes=%r" % (self.early_release, owned_nodes, nodes))
9644

    
9645
    return result
9646

    
9647
  def _CheckVolumeGroup(self, nodes):
9648
    self.lu.LogInfo("Checking volume groups")
9649

    
9650
    vgname = self.cfg.GetVGName()
9651

    
9652
    # Make sure volume group exists on all involved nodes
9653
    results = self.rpc.call_vg_list(nodes)
9654
    if not results:
9655
      raise errors.OpExecError("Can't list volume groups on the nodes")
9656

    
9657
    for node in nodes:
9658
      res = results[node]
9659
      res.Raise("Error checking node %s" % node)
9660
      if vgname not in res.payload:
9661
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
9662
                                 (vgname, node))
9663

    
9664
  def _CheckDisksExistence(self, nodes):
9665
    # Check disk existence
9666
    for idx, dev in enumerate(self.instance.disks):
9667
      if idx not in self.disks:
9668
        continue
9669

    
9670
      for node in nodes:
9671
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
9672
        self.cfg.SetDiskID(dev, node)
9673

    
9674
        result = self.rpc.call_blockdev_find(node, dev)
9675

    
9676
        msg = result.fail_msg
9677
        if msg or not result.payload:
9678
          if not msg:
9679
            msg = "disk not found"
9680
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
9681
                                   (idx, node, msg))
9682

    
9683
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
9684
    for idx, dev in enumerate(self.instance.disks):
9685
      if idx not in self.disks:
9686
        continue
9687

    
9688
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
9689
                      (idx, node_name))
9690

    
9691
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
9692
                                   ldisk=ldisk):
9693
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
9694
                                 " replace disks for instance %s" %
9695
                                 (node_name, self.instance.name))
9696

    
9697
  def _CreateNewStorage(self, node_name):
9698
    """Create new storage on the primary or secondary node.
9699

9700
    This is only used for same-node replaces, not for changing the
9701
    secondary node, hence we don't want to modify the existing disk.
9702

9703
    """
9704
    iv_names = {}
9705

    
9706
    for idx, dev in enumerate(self.instance.disks):
9707
      if idx not in self.disks:
9708
        continue
9709

    
9710
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
9711

    
9712
      self.cfg.SetDiskID(dev, node_name)
9713

    
9714
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
9715
      names = _GenerateUniqueNames(self.lu, lv_names)
9716

    
9717
      vg_data = dev.children[0].logical_id[0]
9718
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
9719
                             logical_id=(vg_data, names[0]))
9720
      vg_meta = dev.children[1].logical_id[0]
9721
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
9722
                             logical_id=(vg_meta, names[1]))
9723

    
9724
      new_lvs = [lv_data, lv_meta]
9725
      old_lvs = [child.Copy() for child in dev.children]
9726
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
9727

    
9728
      # we pass force_create=True to force the LVM creation
9729
      for new_lv in new_lvs:
9730
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
9731
                        _GetInstanceInfoText(self.instance), False)
9732

    
9733
    return iv_names
9734

    
9735
  def _CheckDevices(self, node_name, iv_names):
9736
    for name, (dev, _, _) in iv_names.iteritems():
9737
      self.cfg.SetDiskID(dev, node_name)
9738

    
9739
      result = self.rpc.call_blockdev_find(node_name, dev)
9740

    
9741
      msg = result.fail_msg
9742
      if msg or not result.payload:
9743
        if not msg:
9744
          msg = "disk not found"
9745
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
9746
                                 (name, msg))
9747

    
9748
      if result.payload.is_degraded:
9749
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
9750

    
9751
  def _RemoveOldStorage(self, node_name, iv_names):
9752
    for name, (_, old_lvs, _) in iv_names.iteritems():
9753
      self.lu.LogInfo("Remove logical volumes for %s" % name)
9754

    
9755
      for lv in old_lvs:
9756
        self.cfg.SetDiskID(lv, node_name)
9757

    
9758
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
9759
        if msg:
9760
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
9761
                             hint="remove unused LVs manually")
9762

    
9763
  def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
9764
    """Replace a disk on the primary or secondary for DRBD 8.
9765

9766
    The algorithm for replace is quite complicated:
9767

9768
      1. for each disk to be replaced:
9769

9770
        1. create new LVs on the target node with unique names
9771
        1. detach old LVs from the drbd device
9772
        1. rename old LVs to name_replaced.<time_t>
9773
        1. rename new LVs to old LVs
9774
        1. attach the new LVs (with the old names now) to the drbd device
9775

9776
      1. wait for sync across all devices
9777

9778
      1. for each modified disk:
9779

9780
        1. remove old LVs (which have the name name_replaces.<time_t>)
9781

9782
    Failures are not very well handled.
9783

9784
    """
9785
    steps_total = 6
9786

    
9787
    # Step: check device activation
9788
    self.lu.LogStep(1, steps_total, "Check device existence")
9789
    self._CheckDisksExistence([self.other_node, self.target_node])
9790
    self._CheckVolumeGroup([self.target_node, self.other_node])
9791

    
9792
    # Step: check other node consistency
9793
    self.lu.LogStep(2, steps_total, "Check peer consistency")
9794
    self._CheckDisksConsistency(self.other_node,
9795
                                self.other_node == self.instance.primary_node,
9796
                                False)
9797

    
9798
    # Step: create new storage
9799
    self.lu.LogStep(3, steps_total, "Allocate new storage")
9800
    iv_names = self._CreateNewStorage(self.target_node)
9801

    
9802
    # Step: for each lv, detach+rename*2+attach
9803
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
9804
    for dev, old_lvs, new_lvs in iv_names.itervalues():
9805
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
9806

    
9807
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
9808
                                                     old_lvs)
9809
      result.Raise("Can't detach drbd from local storage on node"
9810
                   " %s for device %s" % (self.target_node, dev.iv_name))
9811
      #dev.children = []
9812
      #cfg.Update(instance)
9813

    
9814
      # ok, we created the new LVs, so now we know we have the needed
9815
      # storage; as such, we proceed on the target node to rename
9816
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
9817
      # using the assumption that logical_id == physical_id (which in
9818
      # turn is the unique_id on that node)
9819

    
9820
      # FIXME(iustin): use a better name for the replaced LVs
9821
      temp_suffix = int(time.time())
9822
      ren_fn = lambda d, suff: (d.physical_id[0],
9823
                                d.physical_id[1] + "_replaced-%s" % suff)
9824

    
9825
      # Build the rename list based on what LVs exist on the node
9826
      rename_old_to_new = []
9827
      for to_ren in old_lvs:
9828
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
9829
        if not result.fail_msg and result.payload:
9830
          # device exists
9831
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
9832

    
9833
      self.lu.LogInfo("Renaming the old LVs on the target node")
9834
      result = self.rpc.call_blockdev_rename(self.target_node,
9835
                                             rename_old_to_new)
9836
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
9837

    
9838
      # Now we rename the new LVs to the old LVs
9839
      self.lu.LogInfo("Renaming the new LVs on the target node")
9840
      rename_new_to_old = [(new, old.physical_id)
9841
                           for old, new in zip(old_lvs, new_lvs)]
9842
      result = self.rpc.call_blockdev_rename(self.target_node,
9843
                                             rename_new_to_old)
9844
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
9845

    
9846
      # Intermediate steps of in memory modifications
9847
      for old, new in zip(old_lvs, new_lvs):
9848
        new.logical_id = old.logical_id
9849
        self.cfg.SetDiskID(new, self.target_node)
9850

    
9851
      # We need to modify old_lvs so that removal later removes the
9852
      # right LVs, not the newly added ones; note that old_lvs is a
9853
      # copy here
9854
      for disk in old_lvs:
9855
        disk.logical_id = ren_fn(disk, temp_suffix)
9856
        self.cfg.SetDiskID(disk, self.target_node)
9857

    
9858
      # Now that the new lvs have the old name, we can add them to the device
9859
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
9860
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
9861
                                                  new_lvs)
9862
      msg = result.fail_msg
9863
      if msg:
9864
        for new_lv in new_lvs:
9865
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
9866
                                               new_lv).fail_msg
9867
          if msg2:
9868
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
9869
                               hint=("cleanup manually the unused logical"
9870
                                     "volumes"))
9871
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
9872

    
9873
    cstep = 5
9874
    if self.early_release:
9875
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
9876
      cstep += 1
9877
      self._RemoveOldStorage(self.target_node, iv_names)
9878
      # WARNING: we release both node locks here, do not do other RPCs
9879
      # than WaitForSync to the primary node
9880
      _ReleaseLocks(self.lu, locking.LEVEL_NODE,
9881
                    names=[self.target_node, self.other_node])
9882

    
9883
    # Wait for sync
9884
    # This can fail as the old devices are degraded and _WaitForSync
9885
    # does a combined result over all disks, so we don't check its return value
9886
    self.lu.LogStep(cstep, steps_total, "Sync devices")
9887
    cstep += 1
9888
    _WaitForSync(self.lu, self.instance)
9889

    
9890
    # Check all devices manually
9891
    self._CheckDevices(self.instance.primary_node, iv_names)
9892

    
9893
    # Step: remove old storage
9894
    if not self.early_release:
9895
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
9896
      cstep += 1
9897
      self._RemoveOldStorage(self.target_node, iv_names)
9898

    
9899
  def _ExecDrbd8Secondary(self, feedback_fn):
9900
    """Replace the secondary node for DRBD 8.
9901

9902
    The algorithm for replace is quite complicated:
9903
      - for all disks of the instance:
9904
        - create new LVs on the new node with same names
9905
        - shutdown the drbd device on the old secondary
9906
        - disconnect the drbd network on the primary
9907
        - create the drbd device on the new secondary
9908
        - network attach the drbd on the primary, using an artifice:
9909
          the drbd code for Attach() will connect to the network if it
9910
          finds a device which is connected to the good local disks but
9911
          not network enabled
9912
      - wait for sync across all devices
9913
      - remove all disks from the old secondary
9914

9915
    Failures are not very well handled.
9916

9917
    """
9918
    steps_total = 6
9919

    
9920
    pnode = self.instance.primary_node
9921

    
9922
    # Step: check device activation
9923
    self.lu.LogStep(1, steps_total, "Check device existence")
9924
    self._CheckDisksExistence([self.instance.primary_node])
9925
    self._CheckVolumeGroup([self.instance.primary_node])
9926

    
9927
    # Step: check other node consistency
9928
    self.lu.LogStep(2, steps_total, "Check peer consistency")
9929
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
9930

    
9931
    # Step: create new storage
9932
    self.lu.LogStep(3, steps_total, "Allocate new storage")
9933
    for idx, dev in enumerate(self.instance.disks):
9934
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
9935
                      (self.new_node, idx))
9936
      # we pass force_create=True to force LVM creation
9937
      for new_lv in dev.children:
9938
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
9939
                        _GetInstanceInfoText(self.instance), False)
9940

    
9941
    # Step 4: dbrd minors and drbd setups changes
9942
    # after this, we must manually remove the drbd minors on both the
9943
    # error and the success paths
9944
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
9945
    minors = self.cfg.AllocateDRBDMinor([self.new_node
9946
                                         for dev in self.instance.disks],
9947
                                        self.instance.name)
9948
    logging.debug("Allocated minors %r", minors)
9949

    
9950
    iv_names = {}
9951
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
9952
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
9953
                      (self.new_node, idx))
9954
      # create new devices on new_node; note that we create two IDs:
9955
      # one without port, so the drbd will be activated without
9956
      # networking information on the new node at this stage, and one
9957
      # with network, for the latter activation in step 4
9958
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
9959
      if self.instance.primary_node == o_node1:
9960
        p_minor = o_minor1
9961
      else:
9962
        assert self.instance.primary_node == o_node2, "Three-node instance?"
9963
        p_minor = o_minor2
9964

    
9965
      new_alone_id = (self.instance.primary_node, self.new_node, None,
9966
                      p_minor, new_minor, o_secret)
9967
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
9968
                    p_minor, new_minor, o_secret)
9969

    
9970
      iv_names[idx] = (dev, dev.children, new_net_id)
9971
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
9972
                    new_net_id)
9973
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
9974
                              logical_id=new_alone_id,
9975
                              children=dev.children,
9976
                              size=dev.size)
9977
      try:
9978
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
9979
                              _GetInstanceInfoText(self.instance), False)
9980
      except errors.GenericError:
9981
        self.cfg.ReleaseDRBDMinors(self.instance.name)
9982
        raise
9983

    
9984
    # We have new devices, shutdown the drbd on the old secondary
9985
    for idx, dev in enumerate(self.instance.disks):
9986
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
9987
      self.cfg.SetDiskID(dev, self.target_node)
9988
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
9989
      if msg:
9990
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
9991
                           "node: %s" % (idx, msg),
9992
                           hint=("Please cleanup this device manually as"
9993
                                 " soon as possible"))
9994

    
9995
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
9996
    result = self.rpc.call_drbd_disconnect_net([pnode], self.node_secondary_ip,
9997
                                               self.instance.disks)[pnode]
9998

    
9999
    msg = result.fail_msg
10000
    if msg:
10001
      # detaches didn't succeed (unlikely)
10002
      self.cfg.ReleaseDRBDMinors(self.instance.name)
10003
      raise errors.OpExecError("Can't detach the disks from the network on"
10004
                               " old node: %s" % (msg,))
10005

    
10006
    # if we managed to detach at least one, we update all the disks of
10007
    # the instance to point to the new secondary
10008
    self.lu.LogInfo("Updating instance configuration")
10009
    for dev, _, new_logical_id in iv_names.itervalues():
10010
      dev.logical_id = new_logical_id
10011
      self.cfg.SetDiskID(dev, self.instance.primary_node)
10012

    
10013
    self.cfg.Update(self.instance, feedback_fn)
10014

    
10015
    # and now perform the drbd attach
10016
    self.lu.LogInfo("Attaching primary drbds to new secondary"
10017
                    " (standalone => connected)")
10018
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
10019
                                            self.new_node],
10020
                                           self.node_secondary_ip,
10021
                                           self.instance.disks,
10022
                                           self.instance.name,
10023
                                           False)
10024
    for to_node, to_result in result.items():
10025
      msg = to_result.fail_msg
10026
      if msg:
10027
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
10028
                           to_node, msg,
10029
                           hint=("please do a gnt-instance info to see the"
10030
                                 " status of disks"))
10031
    cstep = 5
10032
    if self.early_release:
10033
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
10034
      cstep += 1
10035
      self._RemoveOldStorage(self.target_node, iv_names)
10036
      # WARNING: we release all node locks here, do not do other RPCs
10037
      # than WaitForSync to the primary node
10038
      _ReleaseLocks(self.lu, locking.LEVEL_NODE,
10039
                    names=[self.instance.primary_node,
10040
                           self.target_node,
10041
                           self.new_node])
10042

    
10043
    # Wait for sync
10044
    # This can fail as the old devices are degraded and _WaitForSync
10045
    # does a combined result over all disks, so we don't check its return value
10046
    self.lu.LogStep(cstep, steps_total, "Sync devices")
10047
    cstep += 1
10048
    _WaitForSync(self.lu, self.instance)
10049

    
10050
    # Check all devices manually
10051
    self._CheckDevices(self.instance.primary_node, iv_names)
10052

    
10053
    # Step: remove old storage
10054
    if not self.early_release:
10055
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
10056
      self._RemoveOldStorage(self.target_node, iv_names)
10057

    
10058

    
10059
class LURepairNodeStorage(NoHooksLU):
10060
  """Repairs the volume group on a node.
10061

10062
  """
10063
  REQ_BGL = False
10064

    
10065
  def CheckArguments(self):
10066
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
10067

    
10068
    storage_type = self.op.storage_type
10069

    
10070
    if (constants.SO_FIX_CONSISTENCY not in
10071
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
10072
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
10073
                                 " repaired" % storage_type,
10074
                                 errors.ECODE_INVAL)
10075

    
10076
  def ExpandNames(self):
10077
    self.needed_locks = {
10078
      locking.LEVEL_NODE: [self.op.node_name],
10079
      }
10080

    
10081
  def _CheckFaultyDisks(self, instance, node_name):
10082
    """Ensure faulty disks abort the opcode or at least warn."""
10083
    try:
10084
      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
10085
                                  node_name, True):
10086
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
10087
                                   " node '%s'" % (instance.name, node_name),
10088
                                   errors.ECODE_STATE)
10089
    except errors.OpPrereqError, err:
10090
      if self.op.ignore_consistency:
10091
        self.proc.LogWarning(str(err.args[0]))
10092
      else:
10093
        raise
10094

    
10095
  def CheckPrereq(self):
10096
    """Check prerequisites.
10097

10098
    """
10099
    # Check whether any instance on this node has faulty disks
10100
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
10101
      if not inst.admin_up:
10102
        continue
10103
      check_nodes = set(inst.all_nodes)
10104
      check_nodes.discard(self.op.node_name)
10105
      for inst_node_name in check_nodes:
10106
        self._CheckFaultyDisks(inst, inst_node_name)
10107

    
10108
  def Exec(self, feedback_fn):
10109
    feedback_fn("Repairing storage unit '%s' on %s ..." %
10110
                (self.op.name, self.op.node_name))
10111

    
10112
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
10113
    result = self.rpc.call_storage_execute(self.op.node_name,
10114
                                           self.op.storage_type, st_args,
10115
                                           self.op.name,
10116
                                           constants.SO_FIX_CONSISTENCY)
10117
    result.Raise("Failed to repair storage unit '%s' on %s" %
10118
                 (self.op.name, self.op.node_name))
10119

    
10120

    
10121
class LUNodeEvacuate(NoHooksLU):
10122
  """Evacuates instances off a list of nodes.
10123

10124
  """
10125
  REQ_BGL = False
10126

    
10127
  def CheckArguments(self):
10128
    _CheckIAllocatorOrNode(self, "iallocator", "remote_node")
10129

    
10130
  def ExpandNames(self):
10131
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
10132

    
10133
    if self.op.remote_node is not None:
10134
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
10135
      assert self.op.remote_node
10136

    
10137
      if self.op.remote_node == self.op.node_name:
10138
        raise errors.OpPrereqError("Can not use evacuated node as a new"
10139
                                   " secondary node", errors.ECODE_INVAL)
10140

    
10141
      if self.op.mode != constants.IALLOCATOR_NEVAC_SEC:
10142
        raise errors.OpPrereqError("Without the use of an iallocator only"
10143
                                   " secondary instances can be evacuated",
10144
                                   errors.ECODE_INVAL)
10145

    
10146
    # Declare locks
10147
    self.share_locks = _ShareAll()
10148
    self.needed_locks = {
10149
      locking.LEVEL_INSTANCE: [],
10150
      locking.LEVEL_NODEGROUP: [],
10151
      locking.LEVEL_NODE: [],
10152
      }
10153

    
10154
    if self.op.remote_node is None:
10155
      # Iallocator will choose any node(s) in the same group
10156
      group_nodes = self.cfg.GetNodeGroupMembersByNodes([self.op.node_name])
10157
    else:
10158
      group_nodes = frozenset([self.op.remote_node])
10159

    
10160
    # Determine nodes to be locked
10161
    self.lock_nodes = set([self.op.node_name]) | group_nodes
10162

    
10163
  def _DetermineInstances(self):
10164
    """Builds list of instances to operate on.
10165

10166
    """
10167
    assert self.op.mode in constants.IALLOCATOR_NEVAC_MODES
10168

    
10169
    if self.op.mode == constants.IALLOCATOR_NEVAC_PRI:
10170
      # Primary instances only
10171
      inst_fn = _GetNodePrimaryInstances
10172
      assert self.op.remote_node is None, \
10173
        "Evacuating primary instances requires iallocator"
10174
    elif self.op.mode == constants.IALLOCATOR_NEVAC_SEC:
10175
      # Secondary instances only
10176
      inst_fn = _GetNodeSecondaryInstances
10177
    else:
10178
      # All instances
10179
      assert self.op.mode == constants.IALLOCATOR_NEVAC_ALL
10180
      inst_fn = _GetNodeInstances
10181

    
10182
    return inst_fn(self.cfg, self.op.node_name)
10183

    
10184
  def DeclareLocks(self, level):
10185
    if level == locking.LEVEL_INSTANCE:
10186
      # Lock instances optimistically, needs verification once node and group
10187
      # locks have been acquired
10188
      self.needed_locks[locking.LEVEL_INSTANCE] = \
10189
        set(i.name for i in self._DetermineInstances())
10190

    
10191
    elif level == locking.LEVEL_NODEGROUP:
10192
      # Lock node groups optimistically, needs verification once nodes have
10193
      # been acquired
10194
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
10195
        self.cfg.GetNodeGroupsFromNodes(self.lock_nodes)
10196

    
10197
    elif level == locking.LEVEL_NODE:
10198
      self.needed_locks[locking.LEVEL_NODE] = self.lock_nodes
10199

    
10200
  def CheckPrereq(self):
10201
    # Verify locks
10202
    owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
10203
    owned_nodes = self.owned_locks(locking.LEVEL_NODE)
10204
    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
10205

    
10206
    assert owned_nodes == self.lock_nodes
10207

    
10208
    wanted_groups = self.cfg.GetNodeGroupsFromNodes(owned_nodes)
10209
    if owned_groups != wanted_groups:
10210
      raise errors.OpExecError("Node groups changed since locks were acquired,"
10211
                               " current groups are '%s', used to be '%s'" %
10212
                               (utils.CommaJoin(wanted_groups),
10213
                                utils.CommaJoin(owned_groups)))
10214

    
10215
    # Determine affected instances
10216
    self.instances = self._DetermineInstances()
10217
    self.instance_names = [i.name for i in self.instances]
10218

    
10219
    if set(self.instance_names) != owned_instances:
10220
      raise errors.OpExecError("Instances on node '%s' changed since locks"
10221
                               " were acquired, current instances are '%s',"
10222
                               " used to be '%s'" %
10223
                               (self.op.node_name,
10224
                                utils.CommaJoin(self.instance_names),
10225
                                utils.CommaJoin(owned_instances)))
10226

    
10227
    if self.instance_names:
10228
      self.LogInfo("Evacuating instances from node '%s': %s",
10229
                   self.op.node_name,
10230
                   utils.CommaJoin(utils.NiceSort(self.instance_names)))
10231
    else:
10232
      self.LogInfo("No instances to evacuate from node '%s'",
10233
                   self.op.node_name)
10234

    
10235
    if self.op.remote_node is not None:
10236
      for i in self.instances:
10237
        if i.primary_node == self.op.remote_node:
10238
          raise errors.OpPrereqError("Node %s is the primary node of"
10239
                                     " instance %s, cannot use it as"
10240
                                     " secondary" %
10241
                                     (self.op.remote_node, i.name),
10242
                                     errors.ECODE_INVAL)
10243

    
10244
  def Exec(self, feedback_fn):
10245
    assert (self.op.iallocator is not None) ^ (self.op.remote_node is not None)
10246

    
10247
    if not self.instance_names:
10248
      # No instances to evacuate
10249
      jobs = []
10250

    
10251
    elif self.op.iallocator is not None:
10252
      # TODO: Implement relocation to other group
10253
      ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_NODE_EVAC,
10254
                       evac_mode=self.op.mode,
10255
                       instances=list(self.instance_names))
10256

    
10257
      ial.Run(self.op.iallocator)
10258

    
10259
      if not ial.success:
10260
        raise errors.OpPrereqError("Can't compute node evacuation using"
10261
                                   " iallocator '%s': %s" %
10262
                                   (self.op.iallocator, ial.info),
10263
                                   errors.ECODE_NORES)
10264

    
10265
      jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, True)
10266

    
10267
    elif self.op.remote_node is not None:
10268
      assert self.op.mode == constants.IALLOCATOR_NEVAC_SEC
10269
      jobs = [
10270
        [opcodes.OpInstanceReplaceDisks(instance_name=instance_name,
10271
                                        remote_node=self.op.remote_node,
10272
                                        disks=[],
10273
                                        mode=constants.REPLACE_DISK_CHG,
10274
                                        early_release=self.op.early_release)]
10275
        for instance_name in self.instance_names
10276
        ]
10277

    
10278
    else:
10279
      raise errors.ProgrammerError("No iallocator or remote node")
10280

    
10281
    return ResultWithJobs(jobs)
10282

    
10283

    
10284
def _SetOpEarlyRelease(early_release, op):
10285
  """Sets C{early_release} flag on opcodes if available.
10286

10287
  """
10288
  try:
10289
    op.early_release = early_release
10290
  except AttributeError:
10291
    assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
10292

    
10293
  return op
10294

    
10295

    
10296
def _NodeEvacDest(use_nodes, group, nodes):
10297
  """Returns group or nodes depending on caller's choice.
10298

10299
  """
10300
  if use_nodes:
10301
    return utils.CommaJoin(nodes)
10302
  else:
10303
    return group
10304

    
10305

    
10306
def _LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
10307
  """Unpacks the result of change-group and node-evacuate iallocator requests.
10308

10309
  Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
10310
  L{constants.IALLOCATOR_MODE_CHG_GROUP}.
10311

10312
  @type lu: L{LogicalUnit}
10313
  @param lu: Logical unit instance
10314
  @type alloc_result: tuple/list
10315
  @param alloc_result: Result from iallocator
10316
  @type early_release: bool
10317
  @param early_release: Whether to release locks early if possible
10318
  @type use_nodes: bool
10319
  @param use_nodes: Whether to display node names instead of groups
10320

10321
  """
10322
  (moved, failed, jobs) = alloc_result
10323

    
10324
  if failed:
10325
    lu.LogWarning("Unable to evacuate instances %s",
10326
                  utils.CommaJoin("%s (%s)" % (name, reason)
10327
                                  for (name, reason) in failed))
10328

    
10329
  if moved:
10330
    lu.LogInfo("Instances to be moved: %s",
10331
               utils.CommaJoin("%s (to %s)" %
10332
                               (name, _NodeEvacDest(use_nodes, group, nodes))
10333
                               for (name, group, nodes) in moved))
10334

    
10335
  return [map(compat.partial(_SetOpEarlyRelease, early_release),
10336
              map(opcodes.OpCode.LoadOpCode, ops))
10337
          for ops in jobs]
10338

    
10339

    
10340
class LUInstanceGrowDisk(LogicalUnit):
10341
  """Grow a disk of an instance.
10342

10343
  """
10344
  HPATH = "disk-grow"
10345
  HTYPE = constants.HTYPE_INSTANCE
10346
  REQ_BGL = False
10347

    
10348
  def ExpandNames(self):
10349
    self._ExpandAndLockInstance()
10350
    self.needed_locks[locking.LEVEL_NODE] = []
10351
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
10352

    
10353
  def DeclareLocks(self, level):
10354
    if level == locking.LEVEL_NODE:
10355
      self._LockInstancesNodes()
10356

    
10357
  def BuildHooksEnv(self):
10358
    """Build hooks env.
10359

10360
    This runs on the master, the primary and all the secondaries.
10361

10362
    """
10363
    env = {
10364
      "DISK": self.op.disk,
10365
      "AMOUNT": self.op.amount,
10366
      }
10367
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
10368
    return env
10369

    
10370
  def BuildHooksNodes(self):
10371
    """Build hooks nodes.
10372

10373
    """
10374
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
10375
    return (nl, nl)
10376

    
10377
  def CheckPrereq(self):
10378
    """Check prerequisites.
10379

10380
    This checks that the instance is in the cluster.
10381

10382
    """
10383
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
10384
    assert instance is not None, \
10385
      "Cannot retrieve locked instance %s" % self.op.instance_name
10386
    nodenames = list(instance.all_nodes)
10387
    for node in nodenames:
10388
      _CheckNodeOnline(self, node)
10389

    
10390
    self.instance = instance
10391

    
10392
    if instance.disk_template not in constants.DTS_GROWABLE:
10393
      raise errors.OpPrereqError("Instance's disk layout does not support"
10394
                                 " growing", errors.ECODE_INVAL)
10395

    
10396
    self.disk = instance.FindDisk(self.op.disk)
10397

    
10398
    if instance.disk_template not in (constants.DT_FILE,
10399
                                      constants.DT_SHARED_FILE):
10400
      # TODO: check the free disk space for file, when that feature will be
10401
      # supported
10402
      _CheckNodesFreeDiskPerVG(self, nodenames,
10403
                               self.disk.ComputeGrowth(self.op.amount))
10404

    
10405
  def Exec(self, feedback_fn):
10406
    """Execute disk grow.
10407

10408
    """
10409
    instance = self.instance
10410
    disk = self.disk
10411

    
10412
    disks_ok, _ = _AssembleInstanceDisks(self, self.instance, disks=[disk])
10413
    if not disks_ok:
10414
      raise errors.OpExecError("Cannot activate block device to grow")
10415

    
10416
    # First run all grow ops in dry-run mode
10417
    for node in instance.all_nodes:
10418
      self.cfg.SetDiskID(disk, node)
10419
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount, True)
10420
      result.Raise("Grow request failed to node %s" % node)
10421

    
10422
    # We know that (as far as we can test) operations across different
10423
    # nodes will succeed, time to run it for real
10424
    for node in instance.all_nodes:
10425
      self.cfg.SetDiskID(disk, node)
10426
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount, False)
10427
      result.Raise("Grow request failed to node %s" % node)
10428

    
10429
      # TODO: Rewrite code to work properly
10430
      # DRBD goes into sync mode for a short amount of time after executing the
10431
      # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
10432
      # calling "resize" in sync mode fails. Sleeping for a short amount of
10433
      # time is a work-around.
10434
      time.sleep(5)
10435

    
10436
    disk.RecordGrow(self.op.amount)
10437
    self.cfg.Update(instance, feedback_fn)
10438
    if self.op.wait_for_sync:
10439
      disk_abort = not _WaitForSync(self, instance, disks=[disk])
10440
      if disk_abort:
10441
        self.proc.LogWarning("Disk sync-ing has not returned a good"
10442
                             " status; please check the instance")
10443
      if not instance.admin_up:
10444
        _SafeShutdownInstanceDisks(self, instance, disks=[disk])
10445
    elif not instance.admin_up:
10446
      self.proc.LogWarning("Not shutting down the disk even if the instance is"
10447
                           " not supposed to be running because no wait for"
10448
                           " sync mode was requested")
10449

    
10450

    
10451
class LUInstanceQueryData(NoHooksLU):
10452
  """Query runtime instance data.
10453

10454
  """
10455
  REQ_BGL = False
10456

    
10457
  def ExpandNames(self):
10458
    self.needed_locks = {}
10459

    
10460
    # Use locking if requested or when non-static information is wanted
10461
    if not (self.op.static or self.op.use_locking):
10462
      self.LogWarning("Non-static data requested, locks need to be acquired")
10463
      self.op.use_locking = True
10464

    
10465
    if self.op.instances or not self.op.use_locking:
10466
      # Expand instance names right here
10467
      self.wanted_names = _GetWantedInstances(self, self.op.instances)
10468
    else:
10469
      # Will use acquired locks
10470
      self.wanted_names = None
10471

    
10472
    if self.op.use_locking:
10473
      self.share_locks = _ShareAll()
10474

    
10475
      if self.wanted_names is None:
10476
        self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
10477
      else:
10478
        self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
10479

    
10480
      self.needed_locks[locking.LEVEL_NODE] = []
10481
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
10482

    
10483
  def DeclareLocks(self, level):
10484
    if self.op.use_locking and level == locking.LEVEL_NODE:
10485
      self._LockInstancesNodes()
10486

    
10487
  def CheckPrereq(self):
10488
    """Check prerequisites.
10489

10490
    This only checks the optional instance list against the existing names.
10491

10492
    """
10493
    if self.wanted_names is None:
10494
      assert self.op.use_locking, "Locking was not used"
10495
      self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
10496

    
10497
    self.wanted_instances = \
10498
        map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
10499

    
10500
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
10501
    """Returns the status of a block device
10502

10503
    """
10504
    if self.op.static or not node:
10505
      return None
10506

    
10507
    self.cfg.SetDiskID(dev, node)
10508

    
10509
    result = self.rpc.call_blockdev_find(node, dev)
10510
    if result.offline:
10511
      return None
10512

    
10513
    result.Raise("Can't compute disk status for %s" % instance_name)
10514

    
10515
    status = result.payload
10516
    if status is None:
10517
      return None
10518

    
10519
    return (status.dev_path, status.major, status.minor,
10520
            status.sync_percent, status.estimated_time,
10521
            status.is_degraded, status.ldisk_status)
10522

    
10523
  def _ComputeDiskStatus(self, instance, snode, dev):
10524
    """Compute block device status.
10525

10526
    """
10527
    if dev.dev_type in constants.LDS_DRBD:
10528
      # we change the snode then (otherwise we use the one passed in)
10529
      if dev.logical_id[0] == instance.primary_node:
10530
        snode = dev.logical_id[1]
10531
      else:
10532
        snode = dev.logical_id[0]
10533

    
10534
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
10535
                                              instance.name, dev)
10536
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
10537

    
10538
    if dev.children:
10539
      dev_children = map(compat.partial(self._ComputeDiskStatus,
10540
                                        instance, snode),
10541
                         dev.children)
10542
    else:
10543
      dev_children = []
10544

    
10545
    return {
10546
      "iv_name": dev.iv_name,
10547
      "dev_type": dev.dev_type,
10548
      "logical_id": dev.logical_id,
10549
      "physical_id": dev.physical_id,
10550
      "pstatus": dev_pstatus,
10551
      "sstatus": dev_sstatus,
10552
      "children": dev_children,
10553
      "mode": dev.mode,
10554
      "size": dev.size,
10555
      }
10556

    
10557
  def Exec(self, feedback_fn):
10558
    """Gather and return data"""
10559
    result = {}
10560

    
10561
    cluster = self.cfg.GetClusterInfo()
10562

    
10563
    pri_nodes = self.cfg.GetMultiNodeInfo(i.primary_node
10564
                                          for i in self.wanted_instances)
10565
    for instance, (_, pnode) in zip(self.wanted_instances, pri_nodes):
10566
      if self.op.static or pnode.offline:
10567
        remote_state = None
10568
        if pnode.offline:
10569
          self.LogWarning("Primary node %s is marked offline, returning static"
10570
                          " information only for instance %s" %
10571
                          (pnode.name, instance.name))
10572
      else:
10573
        remote_info = self.rpc.call_instance_info(instance.primary_node,
10574
                                                  instance.name,
10575
                                                  instance.hypervisor)
10576
        remote_info.Raise("Error checking node %s" % instance.primary_node)
10577
        remote_info = remote_info.payload
10578
        if remote_info and "state" in remote_info:
10579
          remote_state = "up"
10580
        else:
10581
          remote_state = "down"
10582

    
10583
      if instance.admin_up:
10584
        config_state = "up"
10585
      else:
10586
        config_state = "down"
10587

    
10588
      disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
10589
                  instance.disks)
10590

    
10591
      result[instance.name] = {
10592
        "name": instance.name,
10593
        "config_state": config_state,
10594
        "run_state": remote_state,
10595
        "pnode": instance.primary_node,
10596
        "snodes": instance.secondary_nodes,
10597
        "os": instance.os,
10598
        # this happens to be the same format used for hooks
10599
        "nics": _NICListToTuple(self, instance.nics),
10600
        "disk_template": instance.disk_template,
10601
        "disks": disks,
10602
        "hypervisor": instance.hypervisor,
10603
        "network_port": instance.network_port,
10604
        "hv_instance": instance.hvparams,
10605
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
10606
        "be_instance": instance.beparams,
10607
        "be_actual": cluster.FillBE(instance),
10608
        "os_instance": instance.osparams,
10609
        "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
10610
        "serial_no": instance.serial_no,
10611
        "mtime": instance.mtime,
10612
        "ctime": instance.ctime,
10613
        "uuid": instance.uuid,
10614
        }
10615

    
10616
    return result
10617

    
10618

    
10619
class LUInstanceSetParams(LogicalUnit):
10620
  """Modifies an instances's parameters.
10621

10622
  """
10623
  HPATH = "instance-modify"
10624
  HTYPE = constants.HTYPE_INSTANCE
10625
  REQ_BGL = False
10626

    
10627
  def CheckArguments(self):
10628
    if not (self.op.nics or self.op.disks or self.op.disk_template or
10629
            self.op.hvparams or self.op.beparams or self.op.os_name):
10630
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
10631

    
10632
    if self.op.hvparams:
10633
      _CheckGlobalHvParams(self.op.hvparams)
10634

    
10635
    # Disk validation
10636
    disk_addremove = 0
10637
    for disk_op, disk_dict in self.op.disks:
10638
      utils.ForceDictType(disk_dict, constants.IDISK_PARAMS_TYPES)
10639
      if disk_op == constants.DDM_REMOVE:
10640
        disk_addremove += 1
10641
        continue
10642
      elif disk_op == constants.DDM_ADD:
10643
        disk_addremove += 1
10644
      else:
10645
        if not isinstance(disk_op, int):
10646
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
10647
        if not isinstance(disk_dict, dict):
10648
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
10649
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
10650

    
10651
      if disk_op == constants.DDM_ADD:
10652
        mode = disk_dict.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
10653
        if mode not in constants.DISK_ACCESS_SET:
10654
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
10655
                                     errors.ECODE_INVAL)
10656
        size = disk_dict.get(constants.IDISK_SIZE, None)
10657
        if size is None:
10658
          raise errors.OpPrereqError("Required disk parameter size missing",
10659
                                     errors.ECODE_INVAL)
10660
        try:
10661
          size = int(size)
10662
        except (TypeError, ValueError), err:
10663
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
10664
                                     str(err), errors.ECODE_INVAL)
10665
        disk_dict[constants.IDISK_SIZE] = size
10666
      else:
10667
        # modification of disk
10668
        if constants.IDISK_SIZE in disk_dict:
10669
          raise errors.OpPrereqError("Disk size change not possible, use"
10670
                                     " grow-disk", errors.ECODE_INVAL)
10671

    
10672
    if disk_addremove > 1:
10673
      raise errors.OpPrereqError("Only one disk add or remove operation"
10674
                                 " supported at a time", errors.ECODE_INVAL)
10675

    
10676
    if self.op.disks and self.op.disk_template is not None:
10677
      raise errors.OpPrereqError("Disk template conversion and other disk"
10678
                                 " changes not supported at the same time",
10679
                                 errors.ECODE_INVAL)
10680

    
10681
    if (self.op.disk_template and
10682
        self.op.disk_template in constants.DTS_INT_MIRROR and
10683
        self.op.remote_node is None):
10684
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
10685
                                 " one requires specifying a secondary node",
10686
                                 errors.ECODE_INVAL)
10687

    
10688
    # NIC validation
10689
    nic_addremove = 0
10690
    for nic_op, nic_dict in self.op.nics:
10691
      utils.ForceDictType(nic_dict, constants.INIC_PARAMS_TYPES)
10692
      if nic_op == constants.DDM_REMOVE:
10693
        nic_addremove += 1
10694
        continue
10695
      elif nic_op == constants.DDM_ADD:
10696
        nic_addremove += 1
10697
      else:
10698
        if not isinstance(nic_op, int):
10699
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
10700
        if not isinstance(nic_dict, dict):
10701
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
10702
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
10703

    
10704
      # nic_dict should be a dict
10705
      nic_ip = nic_dict.get(constants.INIC_IP, None)
10706
      if nic_ip is not None:
10707
        if nic_ip.lower() == constants.VALUE_NONE:
10708
          nic_dict[constants.INIC_IP] = None
10709
        else:
10710
          if not netutils.IPAddress.IsValid(nic_ip):
10711
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
10712
                                       errors.ECODE_INVAL)
10713

    
10714
      nic_bridge = nic_dict.get("bridge", None)
10715
      nic_link = nic_dict.get(constants.INIC_LINK, None)
10716
      if nic_bridge and nic_link:
10717
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
10718
                                   " at the same time", errors.ECODE_INVAL)
10719
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
10720
        nic_dict["bridge"] = None
10721
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
10722
        nic_dict[constants.INIC_LINK] = None
10723

    
10724
      if nic_op == constants.DDM_ADD:
10725
        nic_mac = nic_dict.get(constants.INIC_MAC, None)
10726
        if nic_mac is None:
10727
          nic_dict[constants.INIC_MAC] = constants.VALUE_AUTO
10728

    
10729
      if constants.INIC_MAC in nic_dict:
10730
        nic_mac = nic_dict[constants.INIC_MAC]
10731
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
10732
          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
10733

    
10734
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
10735
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
10736
                                     " modifying an existing nic",
10737
                                     errors.ECODE_INVAL)
10738

    
10739
    if nic_addremove > 1:
10740
      raise errors.OpPrereqError("Only one NIC add or remove operation"
10741
                                 " supported at a time", errors.ECODE_INVAL)
10742

    
10743
  def ExpandNames(self):
10744
    self._ExpandAndLockInstance()
10745
    self.needed_locks[locking.LEVEL_NODE] = []
10746
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
10747

    
10748
  def DeclareLocks(self, level):
10749
    if level == locking.LEVEL_NODE:
10750
      self._LockInstancesNodes()
10751
      if self.op.disk_template and self.op.remote_node:
10752
        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
10753
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
10754

    
10755
  def BuildHooksEnv(self):
10756
    """Build hooks env.
10757

10758
    This runs on the master, primary and secondaries.
10759

10760
    """
10761
    args = dict()
10762
    if constants.BE_MEMORY in self.be_new:
10763
      args["memory"] = self.be_new[constants.BE_MEMORY]
10764
    if constants.BE_VCPUS in self.be_new:
10765
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
10766
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
10767
    # information at all.
10768
    if self.op.nics:
10769
      args["nics"] = []
10770
      nic_override = dict(self.op.nics)
10771
      for idx, nic in enumerate(self.instance.nics):
10772
        if idx in nic_override:
10773
          this_nic_override = nic_override[idx]
10774
        else:
10775
          this_nic_override = {}
10776
        if constants.INIC_IP in this_nic_override:
10777
          ip = this_nic_override[constants.INIC_IP]
10778
        else:
10779
          ip = nic.ip
10780
        if constants.INIC_MAC in this_nic_override:
10781
          mac = this_nic_override[constants.INIC_MAC]
10782
        else:
10783
          mac = nic.mac
10784
        if idx in self.nic_pnew:
10785
          nicparams = self.nic_pnew[idx]
10786
        else:
10787
          nicparams = self.cluster.SimpleFillNIC(nic.nicparams)
10788
        mode = nicparams[constants.NIC_MODE]
10789
        link = nicparams[constants.NIC_LINK]
10790
        args["nics"].append((ip, mac, mode, link))
10791
      if constants.DDM_ADD in nic_override:
10792
        ip = nic_override[constants.DDM_ADD].get(constants.INIC_IP, None)
10793
        mac = nic_override[constants.DDM_ADD][constants.INIC_MAC]
10794
        nicparams = self.nic_pnew[constants.DDM_ADD]
10795
        mode = nicparams[constants.NIC_MODE]
10796
        link = nicparams[constants.NIC_LINK]
10797
        args["nics"].append((ip, mac, mode, link))
10798
      elif constants.DDM_REMOVE in nic_override:
10799
        del args["nics"][-1]
10800

    
10801
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
10802
    if self.op.disk_template:
10803
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
10804

    
10805
    return env
10806

    
10807
  def BuildHooksNodes(self):
10808
    """Build hooks nodes.
10809

10810
    """
10811
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
10812
    return (nl, nl)
10813

    
10814
  def CheckPrereq(self):
10815
    """Check prerequisites.
10816

10817
    This only checks the instance list against the existing names.
10818

10819
    """
10820
    # checking the new params on the primary/secondary nodes
10821

    
10822
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
10823
    cluster = self.cluster = self.cfg.GetClusterInfo()
10824
    assert self.instance is not None, \
10825
      "Cannot retrieve locked instance %s" % self.op.instance_name
10826
    pnode = instance.primary_node
10827
    nodelist = list(instance.all_nodes)
10828

    
10829
    # OS change
10830
    if self.op.os_name and not self.op.force:
10831
      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
10832
                      self.op.force_variant)
10833
      instance_os = self.op.os_name
10834
    else:
10835
      instance_os = instance.os
10836

    
10837
    if self.op.disk_template:
10838
      if instance.disk_template == self.op.disk_template:
10839
        raise errors.OpPrereqError("Instance already has disk template %s" %
10840
                                   instance.disk_template, errors.ECODE_INVAL)
10841

    
10842
      if (instance.disk_template,
10843
          self.op.disk_template) not in self._DISK_CONVERSIONS:
10844
        raise errors.OpPrereqError("Unsupported disk template conversion from"
10845
                                   " %s to %s" % (instance.disk_template,
10846
                                                  self.op.disk_template),
10847
                                   errors.ECODE_INVAL)
10848
      _CheckInstanceDown(self, instance, "cannot change disk template")
10849
      if self.op.disk_template in constants.DTS_INT_MIRROR:
10850
        if self.op.remote_node == pnode:
10851
          raise errors.OpPrereqError("Given new secondary node %s is the same"
10852
                                     " as the primary node of the instance" %
10853
                                     self.op.remote_node, errors.ECODE_STATE)
10854
        _CheckNodeOnline(self, self.op.remote_node)
10855
        _CheckNodeNotDrained(self, self.op.remote_node)
10856
        # FIXME: here we assume that the old instance type is DT_PLAIN
10857
        assert instance.disk_template == constants.DT_PLAIN
10858
        disks = [{constants.IDISK_SIZE: d.size,
10859
                  constants.IDISK_VG: d.logical_id[0]}
10860
                 for d in instance.disks]
10861
        required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
10862
        _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
10863

    
10864
    # hvparams processing
10865
    if self.op.hvparams:
10866
      hv_type = instance.hypervisor
10867
      i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
10868
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
10869
      hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
10870

    
10871
      # local check
10872
      hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
10873
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
10874
      self.hv_proposed = self.hv_new = hv_new # the new actual values
10875
      self.hv_inst = i_hvdict # the new dict (without defaults)
10876
    else:
10877
      self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
10878
                                              instance.hvparams)
10879
      self.hv_new = self.hv_inst = {}
10880

    
10881
    # beparams processing
10882
    if self.op.beparams:
10883
      i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
10884
                                   use_none=True)
10885
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
10886
      be_new = cluster.SimpleFillBE(i_bedict)
10887
      self.be_proposed = self.be_new = be_new # the new actual values
10888
      self.be_inst = i_bedict # the new dict (without defaults)
10889
    else:
10890
      self.be_new = self.be_inst = {}
10891
      self.be_proposed = cluster.SimpleFillBE(instance.beparams)
10892
    be_old = cluster.FillBE(instance)
10893

    
10894
    # CPU param validation -- checking every time a paramtere is
10895
    # changed to cover all cases where either CPU mask or vcpus have
10896
    # changed
10897
    if (constants.BE_VCPUS in self.be_proposed and
10898
        constants.HV_CPU_MASK in self.hv_proposed):
10899
      cpu_list = \
10900
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
10901
      # Verify mask is consistent with number of vCPUs. Can skip this
10902
      # test if only 1 entry in the CPU mask, which means same mask
10903
      # is applied to all vCPUs.
10904
      if (len(cpu_list) > 1 and
10905
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
10906
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
10907
                                   " CPU mask [%s]" %
10908
                                   (self.be_proposed[constants.BE_VCPUS],
10909
                                    self.hv_proposed[constants.HV_CPU_MASK]),
10910
                                   errors.ECODE_INVAL)
10911

    
10912
      # Only perform this test if a new CPU mask is given
10913
      if constants.HV_CPU_MASK in self.hv_new:
10914
        # Calculate the largest CPU number requested
10915
        max_requested_cpu = max(map(max, cpu_list))
10916
        # Check that all of the instance's nodes have enough physical CPUs to
10917
        # satisfy the requested CPU mask
10918
        _CheckNodesPhysicalCPUs(self, instance.all_nodes,
10919
                                max_requested_cpu + 1, instance.hypervisor)
10920

    
10921
    # osparams processing
10922
    if self.op.osparams:
10923
      i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
10924
      _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
10925
      self.os_inst = i_osdict # the new dict (without defaults)
10926
    else:
10927
      self.os_inst = {}
10928

    
10929
    self.warn = []
10930

    
10931
    if (constants.BE_MEMORY in self.op.beparams and not self.op.force and
10932
        be_new[constants.BE_MEMORY] > be_old[constants.BE_MEMORY]):
10933
      mem_check_list = [pnode]
10934
      if be_new[constants.BE_AUTO_BALANCE]:
10935
        # either we changed auto_balance to yes or it was from before
10936
        mem_check_list.extend(instance.secondary_nodes)
10937
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
10938
                                                  instance.hypervisor)
10939
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
10940
                                         instance.hypervisor)
10941
      pninfo = nodeinfo[pnode]
10942
      msg = pninfo.fail_msg
10943
      if msg:
10944
        # Assume the primary node is unreachable and go ahead
10945
        self.warn.append("Can't get info from primary node %s: %s" %
10946
                         (pnode, msg))
10947
      elif not isinstance(pninfo.payload.get("memory_free", None), int):
10948
        self.warn.append("Node data from primary node %s doesn't contain"
10949
                         " free memory information" % pnode)
10950
      elif instance_info.fail_msg:
10951
        self.warn.append("Can't get instance runtime information: %s" %
10952
                        instance_info.fail_msg)
10953
      else:
10954
        if instance_info.payload:
10955
          current_mem = int(instance_info.payload["memory"])
10956
        else:
10957
          # Assume instance not running
10958
          # (there is a slight race condition here, but it's not very probable,
10959
          # and we have no other way to check)
10960
          current_mem = 0
10961
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
10962
                    pninfo.payload["memory_free"])
10963
        if miss_mem > 0:
10964
          raise errors.OpPrereqError("This change will prevent the instance"
10965
                                     " from starting, due to %d MB of memory"
10966
                                     " missing on its primary node" % miss_mem,
10967
                                     errors.ECODE_NORES)
10968

    
10969
      if be_new[constants.BE_AUTO_BALANCE]:
10970
        for node, nres in nodeinfo.items():
10971
          if node not in instance.secondary_nodes:
10972
            continue
10973
          nres.Raise("Can't get info from secondary node %s" % node,
10974
                     prereq=True, ecode=errors.ECODE_STATE)
10975
          if not isinstance(nres.payload.get("memory_free", None), int):
10976
            raise errors.OpPrereqError("Secondary node %s didn't return free"
10977
                                       " memory information" % node,
10978
                                       errors.ECODE_STATE)
10979
          elif be_new[constants.BE_MEMORY] > nres.payload["memory_free"]:
10980
            raise errors.OpPrereqError("This change will prevent the instance"
10981
                                       " from failover to its secondary node"
10982
                                       " %s, due to not enough memory" % node,
10983
                                       errors.ECODE_STATE)
10984

    
10985
    # NIC processing
10986
    self.nic_pnew = {}
10987
    self.nic_pinst = {}
10988
    for nic_op, nic_dict in self.op.nics:
10989
      if nic_op == constants.DDM_REMOVE:
10990
        if not instance.nics:
10991
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
10992
                                     errors.ECODE_INVAL)
10993
        continue
10994
      if nic_op != constants.DDM_ADD:
10995
        # an existing nic
10996
        if not instance.nics:
10997
          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
10998
                                     " no NICs" % nic_op,
10999
                                     errors.ECODE_INVAL)
11000
        if nic_op < 0 or nic_op >= len(instance.nics):
11001
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
11002
                                     " are 0 to %d" %
11003
                                     (nic_op, len(instance.nics) - 1),
11004
                                     errors.ECODE_INVAL)
11005
        old_nic_params = instance.nics[nic_op].nicparams
11006
        old_nic_ip = instance.nics[nic_op].ip
11007
      else:
11008
        old_nic_params = {}
11009
        old_nic_ip = None
11010

    
11011
      update_params_dict = dict([(key, nic_dict[key])
11012
                                 for key in constants.NICS_PARAMETERS
11013
                                 if key in nic_dict])
11014

    
11015
      if "bridge" in nic_dict:
11016
        update_params_dict[constants.NIC_LINK] = nic_dict["bridge"]
11017

    
11018
      new_nic_params = _GetUpdatedParams(old_nic_params,
11019
                                         update_params_dict)
11020
      utils.ForceDictType(new_nic_params, constants.NICS_PARAMETER_TYPES)
11021
      new_filled_nic_params = cluster.SimpleFillNIC(new_nic_params)
11022
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
11023
      self.nic_pinst[nic_op] = new_nic_params
11024
      self.nic_pnew[nic_op] = new_filled_nic_params
11025
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
11026

    
11027
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
11028
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
11029
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
11030
        if msg:
11031
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
11032
          if self.op.force:
11033
            self.warn.append(msg)
11034
          else:
11035
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
11036
      if new_nic_mode == constants.NIC_MODE_ROUTED:
11037
        if constants.INIC_IP in nic_dict:
11038
          nic_ip = nic_dict[constants.INIC_IP]
11039
        else:
11040
          nic_ip = old_nic_ip
11041
        if nic_ip is None:
11042
          raise errors.OpPrereqError("Cannot set the nic ip to None"
11043
                                     " on a routed nic", errors.ECODE_INVAL)
11044
      if constants.INIC_MAC in nic_dict:
11045
        nic_mac = nic_dict[constants.INIC_MAC]
11046
        if nic_mac is None:
11047
          raise errors.OpPrereqError("Cannot set the nic mac to None",
11048
                                     errors.ECODE_INVAL)
11049
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
11050
          # otherwise generate the mac
11051
          nic_dict[constants.INIC_MAC] = \
11052
            self.cfg.GenerateMAC(self.proc.GetECId())
11053
        else:
11054
          # or validate/reserve the current one
11055
          try:
11056
            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
11057
          except errors.ReservationError:
11058
            raise errors.OpPrereqError("MAC address %s already in use"
11059
                                       " in cluster" % nic_mac,
11060
                                       errors.ECODE_NOTUNIQUE)
11061

    
11062
    # DISK processing
11063
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
11064
      raise errors.OpPrereqError("Disk operations not supported for"
11065
                                 " diskless instances",
11066
                                 errors.ECODE_INVAL)
11067
    for disk_op, _ in self.op.disks:
11068
      if disk_op == constants.DDM_REMOVE:
11069
        if len(instance.disks) == 1:
11070
          raise errors.OpPrereqError("Cannot remove the last disk of"
11071
                                     " an instance", errors.ECODE_INVAL)
11072
        _CheckInstanceDown(self, instance, "cannot remove disks")
11073

    
11074
      if (disk_op == constants.DDM_ADD and
11075
          len(instance.disks) >= constants.MAX_DISKS):
11076
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
11077
                                   " add more" % constants.MAX_DISKS,
11078
                                   errors.ECODE_STATE)
11079
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
11080
        # an existing disk
11081
        if disk_op < 0 or disk_op >= len(instance.disks):
11082
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
11083
                                     " are 0 to %d" %
11084
                                     (disk_op, len(instance.disks)),
11085
                                     errors.ECODE_INVAL)
11086

    
11087
    return
11088

    
11089
  def _ConvertPlainToDrbd(self, feedback_fn):
11090
    """Converts an instance from plain to drbd.
11091

11092
    """
11093
    feedback_fn("Converting template to drbd")
11094
    instance = self.instance
11095
    pnode = instance.primary_node
11096
    snode = self.op.remote_node
11097

    
11098
    # create a fake disk info for _GenerateDiskTemplate
11099
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
11100
                  constants.IDISK_VG: d.logical_id[0]}
11101
                 for d in instance.disks]
11102
    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
11103
                                      instance.name, pnode, [snode],
11104
                                      disk_info, None, None, 0, feedback_fn)
11105
    info = _GetInstanceInfoText(instance)
11106
    feedback_fn("Creating aditional volumes...")
11107
    # first, create the missing data and meta devices
11108
    for disk in new_disks:
11109
      # unfortunately this is... not too nice
11110
      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
11111
                            info, True)
11112
      for child in disk.children:
11113
        _CreateSingleBlockDev(self, snode, instance, child, info, True)
11114
    # at this stage, all new LVs have been created, we can rename the
11115
    # old ones
11116
    feedback_fn("Renaming original volumes...")
11117
    rename_list = [(o, n.children[0].logical_id)
11118
                   for (o, n) in zip(instance.disks, new_disks)]
11119
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
11120
    result.Raise("Failed to rename original LVs")
11121

    
11122
    feedback_fn("Initializing DRBD devices...")
11123
    # all child devices are in place, we can now create the DRBD devices
11124
    for disk in new_disks:
11125
      for node in [pnode, snode]:
11126
        f_create = node == pnode
11127
        _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
11128

    
11129
    # at this point, the instance has been modified
11130
    instance.disk_template = constants.DT_DRBD8
11131
    instance.disks = new_disks
11132
    self.cfg.Update(instance, feedback_fn)
11133

    
11134
    # disks are created, waiting for sync
11135
    disk_abort = not _WaitForSync(self, instance,
11136
                                  oneshot=not self.op.wait_for_sync)
11137
    if disk_abort:
11138
      raise errors.OpExecError("There are some degraded disks for"
11139
                               " this instance, please cleanup manually")
11140

    
11141
  def _ConvertDrbdToPlain(self, feedback_fn):
11142
    """Converts an instance from drbd to plain.
11143

11144
    """
11145
    instance = self.instance
11146
    assert len(instance.secondary_nodes) == 1
11147
    pnode = instance.primary_node
11148
    snode = instance.secondary_nodes[0]
11149
    feedback_fn("Converting template to plain")
11150

    
11151
    old_disks = instance.disks
11152
    new_disks = [d.children[0] for d in old_disks]
11153

    
11154
    # copy over size and mode
11155
    for parent, child in zip(old_disks, new_disks):
11156
      child.size = parent.size
11157
      child.mode = parent.mode
11158

    
11159
    # update instance structure
11160
    instance.disks = new_disks
11161
    instance.disk_template = constants.DT_PLAIN
11162
    self.cfg.Update(instance, feedback_fn)
11163

    
11164
    feedback_fn("Removing volumes on the secondary node...")
11165
    for disk in old_disks:
11166
      self.cfg.SetDiskID(disk, snode)
11167
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
11168
      if msg:
11169
        self.LogWarning("Could not remove block device %s on node %s,"
11170
                        " continuing anyway: %s", disk.iv_name, snode, msg)
11171

    
11172
    feedback_fn("Removing unneeded volumes on the primary node...")
11173
    for idx, disk in enumerate(old_disks):
11174
      meta = disk.children[1]
11175
      self.cfg.SetDiskID(meta, pnode)
11176
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
11177
      if msg:
11178
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
11179
                        " continuing anyway: %s", idx, pnode, msg)
11180

    
11181
  def Exec(self, feedback_fn):
11182
    """Modifies an instance.
11183

11184
    All parameters take effect only at the next restart of the instance.
11185

11186
    """
11187
    # Process here the warnings from CheckPrereq, as we don't have a
11188
    # feedback_fn there.
11189
    for warn in self.warn:
11190
      feedback_fn("WARNING: %s" % warn)
11191

    
11192
    result = []
11193
    instance = self.instance
11194
    # disk changes
11195
    for disk_op, disk_dict in self.op.disks:
11196
      if disk_op == constants.DDM_REMOVE:
11197
        # remove the last disk
11198
        device = instance.disks.pop()
11199
        device_idx = len(instance.disks)
11200
        for node, disk in device.ComputeNodeTree(instance.primary_node):
11201
          self.cfg.SetDiskID(disk, node)
11202
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
11203
          if msg:
11204
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
11205
                            " continuing anyway", device_idx, node, msg)
11206
        result.append(("disk/%d" % device_idx, "remove"))
11207
      elif disk_op == constants.DDM_ADD:
11208
        # add a new disk
11209
        if instance.disk_template in (constants.DT_FILE,
11210
                                        constants.DT_SHARED_FILE):
11211
          file_driver, file_path = instance.disks[0].logical_id
11212
          file_path = os.path.dirname(file_path)
11213
        else:
11214
          file_driver = file_path = None
11215
        disk_idx_base = len(instance.disks)
11216
        new_disk = _GenerateDiskTemplate(self,
11217
                                         instance.disk_template,
11218
                                         instance.name, instance.primary_node,
11219
                                         instance.secondary_nodes,
11220
                                         [disk_dict],
11221
                                         file_path,
11222
                                         file_driver,
11223
                                         disk_idx_base, feedback_fn)[0]
11224
        instance.disks.append(new_disk)
11225
        info = _GetInstanceInfoText(instance)
11226

    
11227
        logging.info("Creating volume %s for instance %s",
11228
                     new_disk.iv_name, instance.name)
11229
        # Note: this needs to be kept in sync with _CreateDisks
11230
        #HARDCODE
11231
        for node in instance.all_nodes:
11232
          f_create = node == instance.primary_node
11233
          try:
11234
            _CreateBlockDev(self, node, instance, new_disk,
11235
                            f_create, info, f_create)
11236
          except errors.OpExecError, err:
11237
            self.LogWarning("Failed to create volume %s (%s) on"
11238
                            " node %s: %s",
11239
                            new_disk.iv_name, new_disk, node, err)
11240
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
11241
                       (new_disk.size, new_disk.mode)))
11242
      else:
11243
        # change a given disk
11244
        instance.disks[disk_op].mode = disk_dict[constants.IDISK_MODE]
11245
        result.append(("disk.mode/%d" % disk_op,
11246
                       disk_dict[constants.IDISK_MODE]))
11247

    
11248
    if self.op.disk_template:
11249
      r_shut = _ShutdownInstanceDisks(self, instance)
11250
      if not r_shut:
11251
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
11252
                                 " proceed with disk template conversion")
11253
      mode = (instance.disk_template, self.op.disk_template)
11254
      try:
11255
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
11256
      except:
11257
        self.cfg.ReleaseDRBDMinors(instance.name)
11258
        raise
11259
      result.append(("disk_template", self.op.disk_template))
11260

    
11261
    # NIC changes
11262
    for nic_op, nic_dict in self.op.nics:
11263
      if nic_op == constants.DDM_REMOVE:
11264
        # remove the last nic
11265
        del instance.nics[-1]
11266
        result.append(("nic.%d" % len(instance.nics), "remove"))
11267
      elif nic_op == constants.DDM_ADD:
11268
        # mac and bridge should be set, by now
11269
        mac = nic_dict[constants.INIC_MAC]
11270
        ip = nic_dict.get(constants.INIC_IP, None)
11271
        nicparams = self.nic_pinst[constants.DDM_ADD]
11272
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
11273
        instance.nics.append(new_nic)
11274
        result.append(("nic.%d" % (len(instance.nics) - 1),
11275
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
11276
                       (new_nic.mac, new_nic.ip,
11277
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
11278
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
11279
                       )))
11280
      else:
11281
        for key in (constants.INIC_MAC, constants.INIC_IP):
11282
          if key in nic_dict:
11283
            setattr(instance.nics[nic_op], key, nic_dict[key])
11284
        if nic_op in self.nic_pinst:
11285
          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
11286
        for key, val in nic_dict.iteritems():
11287
          result.append(("nic.%s/%d" % (key, nic_op), val))
11288

    
11289
    # hvparams changes
11290
    if self.op.hvparams:
11291
      instance.hvparams = self.hv_inst
11292
      for key, val in self.op.hvparams.iteritems():
11293
        result.append(("hv/%s" % key, val))
11294

    
11295
    # beparams changes
11296
    if self.op.beparams:
11297
      instance.beparams = self.be_inst
11298
      for key, val in self.op.beparams.iteritems():
11299
        result.append(("be/%s" % key, val))
11300

    
11301
    # OS change
11302
    if self.op.os_name:
11303
      instance.os = self.op.os_name
11304

    
11305
    # osparams changes
11306
    if self.op.osparams:
11307
      instance.osparams = self.os_inst
11308
      for key, val in self.op.osparams.iteritems():
11309
        result.append(("os/%s" % key, val))
11310

    
11311
    self.cfg.Update(instance, feedback_fn)
11312

    
11313
    return result
11314

    
11315
  _DISK_CONVERSIONS = {
11316
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
11317
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
11318
    }
11319

    
11320

    
11321
class LUInstanceChangeGroup(LogicalUnit):
11322
  HPATH = "instance-change-group"
11323
  HTYPE = constants.HTYPE_INSTANCE
11324
  REQ_BGL = False
11325

    
11326
  def ExpandNames(self):
11327
    self.share_locks = _ShareAll()
11328
    self.needed_locks = {
11329
      locking.LEVEL_NODEGROUP: [],
11330
      locking.LEVEL_NODE: [],
11331
      }
11332

    
11333
    self._ExpandAndLockInstance()
11334

    
11335
    if self.op.target_groups:
11336
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
11337
                                  self.op.target_groups)
11338
    else:
11339
      self.req_target_uuids = None
11340

    
11341
    self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
11342

    
11343
  def DeclareLocks(self, level):
11344
    if level == locking.LEVEL_NODEGROUP:
11345
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
11346

    
11347
      if self.req_target_uuids:
11348
        lock_groups = set(self.req_target_uuids)
11349

    
11350
        # Lock all groups used by instance optimistically; this requires going
11351
        # via the node before it's locked, requiring verification later on
11352
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
11353
        lock_groups.update(instance_groups)
11354
      else:
11355
        # No target groups, need to lock all of them
11356
        lock_groups = locking.ALL_SET
11357

    
11358
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
11359

    
11360
    elif level == locking.LEVEL_NODE:
11361
      if self.req_target_uuids:
11362
        # Lock all nodes used by instances
11363
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
11364
        self._LockInstancesNodes()
11365

    
11366
        # Lock all nodes in all potential target groups
11367
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
11368
                       self.cfg.GetInstanceNodeGroups(self.op.instance_name))
11369
        member_nodes = [node_name
11370
                        for group in lock_groups
11371
                        for node_name in self.cfg.GetNodeGroup(group).members]
11372
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
11373
      else:
11374
        # Lock all nodes as all groups are potential targets
11375
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
11376

    
11377
  def CheckPrereq(self):
11378
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
11379
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
11380
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
11381

    
11382
    assert (self.req_target_uuids is None or
11383
            owned_groups.issuperset(self.req_target_uuids))
11384
    assert owned_instances == set([self.op.instance_name])
11385

    
11386
    # Get instance information
11387
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
11388

    
11389
    # Check if node groups for locked instance are still correct
11390
    assert owned_nodes.issuperset(self.instance.all_nodes), \
11391
      ("Instance %s's nodes changed while we kept the lock" %
11392
       self.op.instance_name)
11393

    
11394
    inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
11395
                                           owned_groups)
11396

    
11397
    if self.req_target_uuids:
11398
      # User requested specific target groups
11399
      self.target_uuids = self.req_target_uuids
11400
    else:
11401
      # All groups except those used by the instance are potential targets
11402
      self.target_uuids = owned_groups - inst_groups
11403

    
11404
    conflicting_groups = self.target_uuids & inst_groups
11405
    if conflicting_groups:
11406
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
11407
                                 " used by the instance '%s'" %
11408
                                 (utils.CommaJoin(conflicting_groups),
11409
                                  self.op.instance_name),
11410
                                 errors.ECODE_INVAL)
11411

    
11412
    if not self.target_uuids:
11413
      raise errors.OpPrereqError("There are no possible target groups",
11414
                                 errors.ECODE_INVAL)
11415

    
11416
  def BuildHooksEnv(self):
11417
    """Build hooks env.
11418

11419
    """
11420
    assert self.target_uuids
11421

    
11422
    env = {
11423
      "TARGET_GROUPS": " ".join(self.target_uuids),
11424
      }
11425

    
11426
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
11427

    
11428
    return env
11429

    
11430
  def BuildHooksNodes(self):
11431
    """Build hooks nodes.
11432

11433
    """
11434
    mn = self.cfg.GetMasterNode()
11435
    return ([mn], [mn])
11436

    
11437
  def Exec(self, feedback_fn):
11438
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
11439

    
11440
    assert instances == [self.op.instance_name], "Instance not locked"
11441

    
11442
    ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
11443
                     instances=instances, target_groups=list(self.target_uuids))
11444

    
11445
    ial.Run(self.op.iallocator)
11446

    
11447
    if not ial.success:
11448
      raise errors.OpPrereqError("Can't compute solution for changing group of"
11449
                                 " instance '%s' using iallocator '%s': %s" %
11450
                                 (self.op.instance_name, self.op.iallocator,
11451
                                  ial.info),
11452
                                 errors.ECODE_NORES)
11453

    
11454
    jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
11455

    
11456
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
11457
                 " instance '%s'", len(jobs), self.op.instance_name)
11458

    
11459
    return ResultWithJobs(jobs)
11460

    
11461

    
11462
class LUBackupQuery(NoHooksLU):
11463
  """Query the exports list
11464

11465
  """
11466
  REQ_BGL = False
11467

    
11468
  def ExpandNames(self):
11469
    self.needed_locks = {}
11470
    self.share_locks[locking.LEVEL_NODE] = 1
11471
    if not self.op.nodes:
11472
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
11473
    else:
11474
      self.needed_locks[locking.LEVEL_NODE] = \
11475
        _GetWantedNodes(self, self.op.nodes)
11476

    
11477
  def Exec(self, feedback_fn):
11478
    """Compute the list of all the exported system images.
11479

11480
    @rtype: dict
11481
    @return: a dictionary with the structure node->(export-list)
11482
        where export-list is a list of the instances exported on
11483
        that node.
11484

11485
    """
11486
    self.nodes = self.owned_locks(locking.LEVEL_NODE)
11487
    rpcresult = self.rpc.call_export_list(self.nodes)
11488
    result = {}
11489
    for node in rpcresult:
11490
      if rpcresult[node].fail_msg:
11491
        result[node] = False
11492
      else:
11493
        result[node] = rpcresult[node].payload
11494

    
11495
    return result
11496

    
11497

    
11498
class LUBackupPrepare(NoHooksLU):
11499
  """Prepares an instance for an export and returns useful information.
11500

11501
  """
11502
  REQ_BGL = False
11503

    
11504
  def ExpandNames(self):
11505
    self._ExpandAndLockInstance()
11506

    
11507
  def CheckPrereq(self):
11508
    """Check prerequisites.
11509

11510
    """
11511
    instance_name = self.op.instance_name
11512

    
11513
    self.instance = self.cfg.GetInstanceInfo(instance_name)
11514
    assert self.instance is not None, \
11515
          "Cannot retrieve locked instance %s" % self.op.instance_name
11516
    _CheckNodeOnline(self, self.instance.primary_node)
11517

    
11518
    self._cds = _GetClusterDomainSecret()
11519

    
11520
  def Exec(self, feedback_fn):
11521
    """Prepares an instance for an export.
11522

11523
    """
11524
    instance = self.instance
11525

    
11526
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
11527
      salt = utils.GenerateSecret(8)
11528

    
11529
      feedback_fn("Generating X509 certificate on %s" % instance.primary_node)
11530
      result = self.rpc.call_x509_cert_create(instance.primary_node,
11531
                                              constants.RIE_CERT_VALIDITY)
11532
      result.Raise("Can't create X509 key and certificate on %s" % result.node)
11533

    
11534
      (name, cert_pem) = result.payload
11535

    
11536
      cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
11537
                                             cert_pem)
11538

    
11539
      return {
11540
        "handshake": masterd.instance.ComputeRemoteExportHandshake(self._cds),
11541
        "x509_key_name": (name, utils.Sha1Hmac(self._cds, name, salt=salt),
11542
                          salt),
11543
        "x509_ca": utils.SignX509Certificate(cert, self._cds, salt),
11544
        }
11545

    
11546
    return None
11547

    
11548

    
11549
class LUBackupExport(LogicalUnit):
11550
  """Export an instance to an image in the cluster.
11551

11552
  """
11553
  HPATH = "instance-export"
11554
  HTYPE = constants.HTYPE_INSTANCE
11555
  REQ_BGL = False
11556

    
11557
  def CheckArguments(self):
11558
    """Check the arguments.
11559

11560
    """
11561
    self.x509_key_name = self.op.x509_key_name
11562
    self.dest_x509_ca_pem = self.op.destination_x509_ca
11563

    
11564
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
11565
      if not self.x509_key_name:
11566
        raise errors.OpPrereqError("Missing X509 key name for encryption",
11567
                                   errors.ECODE_INVAL)
11568

    
11569
      if not self.dest_x509_ca_pem:
11570
        raise errors.OpPrereqError("Missing destination X509 CA",
11571
                                   errors.ECODE_INVAL)
11572

    
11573
  def ExpandNames(self):
11574
    self._ExpandAndLockInstance()
11575

    
11576
    # Lock all nodes for local exports
11577
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
11578
      # FIXME: lock only instance primary and destination node
11579
      #
11580
      # Sad but true, for now we have do lock all nodes, as we don't know where
11581
      # the previous export might be, and in this LU we search for it and
11582
      # remove it from its current node. In the future we could fix this by:
11583
      #  - making a tasklet to search (share-lock all), then create the
11584
      #    new one, then one to remove, after
11585
      #  - removing the removal operation altogether
11586
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
11587

    
11588
  def DeclareLocks(self, level):
11589
    """Last minute lock declaration."""
11590
    # All nodes are locked anyway, so nothing to do here.
11591

    
11592
  def BuildHooksEnv(self):
11593
    """Build hooks env.
11594

11595
    This will run on the master, primary node and target node.
11596

11597
    """
11598
    env = {
11599
      "EXPORT_MODE": self.op.mode,
11600
      "EXPORT_NODE": self.op.target_node,
11601
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
11602
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
11603
      # TODO: Generic function for boolean env variables
11604
      "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
11605
      }
11606

    
11607
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
11608

    
11609
    return env
11610

    
11611
  def BuildHooksNodes(self):
11612
    """Build hooks nodes.
11613

11614
    """
11615
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
11616

    
11617
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
11618
      nl.append(self.op.target_node)
11619

    
11620
    return (nl, nl)
11621

    
11622
  def CheckPrereq(self):
11623
    """Check prerequisites.
11624

11625
    This checks that the instance and node names are valid.
11626

11627
    """
11628
    instance_name = self.op.instance_name
11629

    
11630
    self.instance = self.cfg.GetInstanceInfo(instance_name)
11631
    assert self.instance is not None, \
11632
          "Cannot retrieve locked instance %s" % self.op.instance_name
11633
    _CheckNodeOnline(self, self.instance.primary_node)
11634

    
11635
    if (self.op.remove_instance and self.instance.admin_up and
11636
        not self.op.shutdown):
11637
      raise errors.OpPrereqError("Can not remove instance without shutting it"
11638
                                 " down before")
11639

    
11640
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
11641
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
11642
      self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
11643
      assert self.dst_node is not None
11644

    
11645
      _CheckNodeOnline(self, self.dst_node.name)
11646
      _CheckNodeNotDrained(self, self.dst_node.name)
11647

    
11648
      self._cds = None
11649
      self.dest_disk_info = None
11650
      self.dest_x509_ca = None
11651

    
11652
    elif self.op.mode == constants.EXPORT_MODE_REMOTE:
11653
      self.dst_node = None
11654

    
11655
      if len(self.op.target_node) != len(self.instance.disks):
11656
        raise errors.OpPrereqError(("Received destination information for %s"
11657
                                    " disks, but instance %s has %s disks") %
11658
                                   (len(self.op.target_node), instance_name,
11659
                                    len(self.instance.disks)),
11660
                                   errors.ECODE_INVAL)
11661

    
11662
      cds = _GetClusterDomainSecret()
11663

    
11664
      # Check X509 key name
11665
      try:
11666
        (key_name, hmac_digest, hmac_salt) = self.x509_key_name
11667
      except (TypeError, ValueError), err:
11668
        raise errors.OpPrereqError("Invalid data for X509 key name: %s" % err)
11669

    
11670
      if not utils.VerifySha1Hmac(cds, key_name, hmac_digest, salt=hmac_salt):
11671
        raise errors.OpPrereqError("HMAC for X509 key name is wrong",
11672
                                   errors.ECODE_INVAL)
11673

    
11674
      # Load and verify CA
11675
      try:
11676
        (cert, _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem, cds)
11677
      except OpenSSL.crypto.Error, err:
11678
        raise errors.OpPrereqError("Unable to load destination X509 CA (%s)" %
11679
                                   (err, ), errors.ECODE_INVAL)
11680

    
11681
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
11682
      if errcode is not None:
11683
        raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
11684
                                   (msg, ), errors.ECODE_INVAL)
11685

    
11686
      self.dest_x509_ca = cert
11687

    
11688
      # Verify target information
11689
      disk_info = []
11690
      for idx, disk_data in enumerate(self.op.target_node):
11691
        try:
11692
          (host, port, magic) = \
11693
            masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
11694
        except errors.GenericError, err:
11695
          raise errors.OpPrereqError("Target info for disk %s: %s" %
11696
                                     (idx, err), errors.ECODE_INVAL)
11697

    
11698
        disk_info.append((host, port, magic))
11699

    
11700
      assert len(disk_info) == len(self.op.target_node)
11701
      self.dest_disk_info = disk_info
11702

    
11703
    else:
11704
      raise errors.ProgrammerError("Unhandled export mode %r" %
11705
                                   self.op.mode)
11706

    
11707
    # instance disk type verification
11708
    # TODO: Implement export support for file-based disks
11709
    for disk in self.instance.disks:
11710
      if disk.dev_type == constants.LD_FILE:
11711
        raise errors.OpPrereqError("Export not supported for instances with"
11712
                                   " file-based disks", errors.ECODE_INVAL)
11713

    
11714
  def _CleanupExports(self, feedback_fn):
11715
    """Removes exports of current instance from all other nodes.
11716

11717
    If an instance in a cluster with nodes A..D was exported to node C, its
11718
    exports will be removed from the nodes A, B and D.
11719

11720
    """
11721
    assert self.op.mode != constants.EXPORT_MODE_REMOTE
11722

    
11723
    nodelist = self.cfg.GetNodeList()
11724
    nodelist.remove(self.dst_node.name)
11725

    
11726
    # on one-node clusters nodelist will be empty after the removal
11727
    # if we proceed the backup would be removed because OpBackupQuery
11728
    # substitutes an empty list with the full cluster node list.
11729
    iname = self.instance.name
11730
    if nodelist:
11731
      feedback_fn("Removing old exports for instance %s" % iname)
11732
      exportlist = self.rpc.call_export_list(nodelist)
11733
      for node in exportlist:
11734
        if exportlist[node].fail_msg:
11735
          continue
11736
        if iname in exportlist[node].payload:
11737
          msg = self.rpc.call_export_remove(node, iname).fail_msg
11738
          if msg:
11739
            self.LogWarning("Could not remove older export for instance %s"
11740
                            " on node %s: %s", iname, node, msg)
11741

    
11742
  def Exec(self, feedback_fn):
11743
    """Export an instance to an image in the cluster.
11744

11745
    """
11746
    assert self.op.mode in constants.EXPORT_MODES
11747

    
11748
    instance = self.instance
11749
    src_node = instance.primary_node
11750

    
11751
    if self.op.shutdown:
11752
      # shutdown the instance, but not the disks
11753
      feedback_fn("Shutting down instance %s" % instance.name)
11754
      result = self.rpc.call_instance_shutdown(src_node, instance,
11755
                                               self.op.shutdown_timeout)
11756
      # TODO: Maybe ignore failures if ignore_remove_failures is set
11757
      result.Raise("Could not shutdown instance %s on"
11758
                   " node %s" % (instance.name, src_node))
11759

    
11760
    # set the disks ID correctly since call_instance_start needs the
11761
    # correct drbd minor to create the symlinks
11762
    for disk in instance.disks:
11763
      self.cfg.SetDiskID(disk, src_node)
11764

    
11765
    activate_disks = (not instance.admin_up)
11766

    
11767
    if activate_disks:
11768
      # Activate the instance disks if we'exporting a stopped instance
11769
      feedback_fn("Activating disks for %s" % instance.name)
11770
      _StartInstanceDisks(self, instance, None)
11771

    
11772
    try:
11773
      helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
11774
                                                     instance)
11775

    
11776
      helper.CreateSnapshots()
11777
      try:
11778
        if (self.op.shutdown and instance.admin_up and
11779
            not self.op.remove_instance):
11780
          assert not activate_disks
11781
          feedback_fn("Starting instance %s" % instance.name)
11782
          result = self.rpc.call_instance_start(src_node, instance,
11783
                                                None, None, False)
11784
          msg = result.fail_msg
11785
          if msg:
11786
            feedback_fn("Failed to start instance: %s" % msg)
11787
            _ShutdownInstanceDisks(self, instance)
11788
            raise errors.OpExecError("Could not start instance: %s" % msg)
11789

    
11790
        if self.op.mode == constants.EXPORT_MODE_LOCAL:
11791
          (fin_resu, dresults) = helper.LocalExport(self.dst_node)
11792
        elif self.op.mode == constants.EXPORT_MODE_REMOTE:
11793
          connect_timeout = constants.RIE_CONNECT_TIMEOUT
11794
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
11795

    
11796
          (key_name, _, _) = self.x509_key_name
11797

    
11798
          dest_ca_pem = \
11799
            OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
11800
                                            self.dest_x509_ca)
11801

    
11802
          (fin_resu, dresults) = helper.RemoteExport(self.dest_disk_info,
11803
                                                     key_name, dest_ca_pem,
11804
                                                     timeouts)
11805
      finally:
11806
        helper.Cleanup()
11807

    
11808
      # Check for backwards compatibility
11809
      assert len(dresults) == len(instance.disks)
11810
      assert compat.all(isinstance(i, bool) for i in dresults), \
11811
             "Not all results are boolean: %r" % dresults
11812

    
11813
    finally:
11814
      if activate_disks:
11815
        feedback_fn("Deactivating disks for %s" % instance.name)
11816
        _ShutdownInstanceDisks(self, instance)
11817

    
11818
    if not (compat.all(dresults) and fin_resu):
11819
      failures = []
11820
      if not fin_resu:
11821
        failures.append("export finalization")
11822
      if not compat.all(dresults):
11823
        fdsk = utils.CommaJoin(idx for (idx, dsk) in enumerate(dresults)
11824
                               if not dsk)
11825
        failures.append("disk export: disk(s) %s" % fdsk)
11826

    
11827
      raise errors.OpExecError("Export failed, errors in %s" %
11828
                               utils.CommaJoin(failures))
11829

    
11830
    # At this point, the export was successful, we can cleanup/finish
11831

    
11832
    # Remove instance if requested
11833
    if self.op.remove_instance:
11834
      feedback_fn("Removing instance %s" % instance.name)
11835
      _RemoveInstance(self, feedback_fn, instance,
11836
                      self.op.ignore_remove_failures)
11837

    
11838
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
11839
      self._CleanupExports(feedback_fn)
11840

    
11841
    return fin_resu, dresults
11842

    
11843

    
11844
class LUBackupRemove(NoHooksLU):
11845
  """Remove exports related to the named instance.
11846

11847
  """
11848
  REQ_BGL = False
11849

    
11850
  def ExpandNames(self):
11851
    self.needed_locks = {}
11852
    # We need all nodes to be locked in order for RemoveExport to work, but we
11853
    # don't need to lock the instance itself, as nothing will happen to it (and
11854
    # we can remove exports also for a removed instance)
11855
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
11856

    
11857
  def Exec(self, feedback_fn):
11858
    """Remove any export.
11859

11860
    """
11861
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
11862
    # If the instance was not found we'll try with the name that was passed in.
11863
    # This will only work if it was an FQDN, though.
11864
    fqdn_warn = False
11865
    if not instance_name:
11866
      fqdn_warn = True
11867
      instance_name = self.op.instance_name
11868

    
11869
    locked_nodes = self.owned_locks(locking.LEVEL_NODE)
11870
    exportlist = self.rpc.call_export_list(locked_nodes)
11871
    found = False
11872
    for node in exportlist:
11873
      msg = exportlist[node].fail_msg
11874
      if msg:
11875
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
11876
        continue
11877
      if instance_name in exportlist[node].payload:
11878
        found = True
11879
        result = self.rpc.call_export_remove(node, instance_name)
11880
        msg = result.fail_msg
11881
        if msg:
11882
          logging.error("Could not remove export for instance %s"
11883
                        " on node %s: %s", instance_name, node, msg)
11884

    
11885
    if fqdn_warn and not found:
11886
      feedback_fn("Export not found. If trying to remove an export belonging"
11887
                  " to a deleted instance please use its Fully Qualified"
11888
                  " Domain Name.")
11889

    
11890

    
11891
class LUGroupAdd(LogicalUnit):
11892
  """Logical unit for creating node groups.
11893

11894
  """
11895
  HPATH = "group-add"
11896
  HTYPE = constants.HTYPE_GROUP
11897
  REQ_BGL = False
11898

    
11899
  def ExpandNames(self):
11900
    # We need the new group's UUID here so that we can create and acquire the
11901
    # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
11902
    # that it should not check whether the UUID exists in the configuration.
11903
    self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
11904
    self.needed_locks = {}
11905
    self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
11906

    
11907
  def CheckPrereq(self):
11908
    """Check prerequisites.
11909

11910
    This checks that the given group name is not an existing node group
11911
    already.
11912

11913
    """
11914
    try:
11915
      existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11916
    except errors.OpPrereqError:
11917
      pass
11918
    else:
11919
      raise errors.OpPrereqError("Desired group name '%s' already exists as a"
11920
                                 " node group (UUID: %s)" %
11921
                                 (self.op.group_name, existing_uuid),
11922
                                 errors.ECODE_EXISTS)
11923

    
11924
    if self.op.ndparams:
11925
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
11926

    
11927
  def BuildHooksEnv(self):
11928
    """Build hooks env.
11929

11930
    """
11931
    return {
11932
      "GROUP_NAME": self.op.group_name,
11933
      }
11934

    
11935
  def BuildHooksNodes(self):
11936
    """Build hooks nodes.
11937

11938
    """
11939
    mn = self.cfg.GetMasterNode()
11940
    return ([mn], [mn])
11941

    
11942
  def Exec(self, feedback_fn):
11943
    """Add the node group to the cluster.
11944

11945
    """
11946
    group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
11947
                                  uuid=self.group_uuid,
11948
                                  alloc_policy=self.op.alloc_policy,
11949
                                  ndparams=self.op.ndparams)
11950

    
11951
    self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
11952
    del self.remove_locks[locking.LEVEL_NODEGROUP]
11953

    
11954

    
11955
class LUGroupAssignNodes(NoHooksLU):
11956
  """Logical unit for assigning nodes to groups.
11957

11958
  """
11959
  REQ_BGL = False
11960

    
11961
  def ExpandNames(self):
11962
    # These raise errors.OpPrereqError on their own:
11963
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
11964
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
11965

    
11966
    # We want to lock all the affected nodes and groups. We have readily
11967
    # available the list of nodes, and the *destination* group. To gather the
11968
    # list of "source" groups, we need to fetch node information later on.
11969
    self.needed_locks = {
11970
      locking.LEVEL_NODEGROUP: set([self.group_uuid]),
11971
      locking.LEVEL_NODE: self.op.nodes,
11972
      }
11973

    
11974
  def DeclareLocks(self, level):
11975
    if level == locking.LEVEL_NODEGROUP:
11976
      assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
11977

    
11978
      # Try to get all affected nodes' groups without having the group or node
11979
      # lock yet. Needs verification later in the code flow.
11980
      groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes)
11981

    
11982
      self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
11983

    
11984
  def CheckPrereq(self):
11985
    """Check prerequisites.
11986

11987
    """
11988
    assert self.needed_locks[locking.LEVEL_NODEGROUP]
11989
    assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
11990
            frozenset(self.op.nodes))
11991

    
11992
    expected_locks = (set([self.group_uuid]) |
11993
                      self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
11994
    actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
11995
    if actual_locks != expected_locks:
11996
      raise errors.OpExecError("Nodes changed groups since locks were acquired,"
11997
                               " current groups are '%s', used to be '%s'" %
11998
                               (utils.CommaJoin(expected_locks),
11999
                                utils.CommaJoin(actual_locks)))
12000

    
12001
    self.node_data = self.cfg.GetAllNodesInfo()
12002
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
12003
    instance_data = self.cfg.GetAllInstancesInfo()
12004

    
12005
    if self.group is None:
12006
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
12007
                               (self.op.group_name, self.group_uuid))
12008

    
12009
    (new_splits, previous_splits) = \
12010
      self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
12011
                                             for node in self.op.nodes],
12012
                                            self.node_data, instance_data)
12013

    
12014
    if new_splits:
12015
      fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
12016

    
12017
      if not self.op.force:
12018
        raise errors.OpExecError("The following instances get split by this"
12019
                                 " change and --force was not given: %s" %
12020
                                 fmt_new_splits)
12021
      else:
12022
        self.LogWarning("This operation will split the following instances: %s",
12023
                        fmt_new_splits)
12024

    
12025
        if previous_splits:
12026
          self.LogWarning("In addition, these already-split instances continue"
12027
                          " to be split across groups: %s",
12028
                          utils.CommaJoin(utils.NiceSort(previous_splits)))
12029

    
12030
  def Exec(self, feedback_fn):
12031
    """Assign nodes to a new group.
12032

12033
    """
12034
    for node in self.op.nodes:
12035
      self.node_data[node].group = self.group_uuid
12036

    
12037
    # FIXME: Depends on side-effects of modifying the result of
12038
    # C{cfg.GetAllNodesInfo}
12039

    
12040
    self.cfg.Update(self.group, feedback_fn) # Saves all modified nodes.
12041

    
12042
  @staticmethod
12043
  def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
12044
    """Check for split instances after a node assignment.
12045

12046
    This method considers a series of node assignments as an atomic operation,
12047
    and returns information about split instances after applying the set of
12048
    changes.
12049

12050
    In particular, it returns information about newly split instances, and
12051
    instances that were already split, and remain so after the change.
12052

12053
    Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
12054
    considered.
12055

12056
    @type changes: list of (node_name, new_group_uuid) pairs.
12057
    @param changes: list of node assignments to consider.
12058
    @param node_data: a dict with data for all nodes
12059
    @param instance_data: a dict with all instances to consider
12060
    @rtype: a two-tuple
12061
    @return: a list of instances that were previously okay and result split as a
12062
      consequence of this change, and a list of instances that were previously
12063
      split and this change does not fix.
12064

12065
    """
12066
    changed_nodes = dict((node, group) for node, group in changes
12067
                         if node_data[node].group != group)
12068

    
12069
    all_split_instances = set()
12070
    previously_split_instances = set()
12071

    
12072
    def InstanceNodes(instance):
12073
      return [instance.primary_node] + list(instance.secondary_nodes)
12074

    
12075
    for inst in instance_data.values():
12076
      if inst.disk_template not in constants.DTS_INT_MIRROR:
12077
        continue
12078

    
12079
      instance_nodes = InstanceNodes(inst)
12080

    
12081
      if len(set(node_data[node].group for node in instance_nodes)) > 1:
12082
        previously_split_instances.add(inst.name)
12083

    
12084
      if len(set(changed_nodes.get(node, node_data[node].group)
12085
                 for node in instance_nodes)) > 1:
12086
        all_split_instances.add(inst.name)
12087

    
12088
    return (list(all_split_instances - previously_split_instances),
12089
            list(previously_split_instances & all_split_instances))
12090

    
12091

    
12092
class _GroupQuery(_QueryBase):
12093
  FIELDS = query.GROUP_FIELDS
12094

    
12095
  def ExpandNames(self, lu):
12096
    lu.needed_locks = {}
12097

    
12098
    self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
12099
    name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
12100

    
12101
    if not self.names:
12102
      self.wanted = [name_to_uuid[name]
12103
                     for name in utils.NiceSort(name_to_uuid.keys())]
12104
    else:
12105
      # Accept names to be either names or UUIDs.
12106
      missing = []
12107
      self.wanted = []
12108
      all_uuid = frozenset(self._all_groups.keys())
12109

    
12110
      for name in self.names:
12111
        if name in all_uuid:
12112
          self.wanted.append(name)
12113
        elif name in name_to_uuid:
12114
          self.wanted.append(name_to_uuid[name])
12115
        else:
12116
          missing.append(name)
12117

    
12118
      if missing:
12119
        raise errors.OpPrereqError("Some groups do not exist: %s" %
12120
                                   utils.CommaJoin(missing),
12121
                                   errors.ECODE_NOENT)
12122

    
12123
  def DeclareLocks(self, lu, level):
12124
    pass
12125

    
12126
  def _GetQueryData(self, lu):
12127
    """Computes the list of node groups and their attributes.
12128

12129
    """
12130
    do_nodes = query.GQ_NODE in self.requested_data
12131
    do_instances = query.GQ_INST in self.requested_data
12132

    
12133
    group_to_nodes = None
12134
    group_to_instances = None
12135

    
12136
    # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
12137
    # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
12138
    # latter GetAllInstancesInfo() is not enough, for we have to go through
12139
    # instance->node. Hence, we will need to process nodes even if we only need
12140
    # instance information.
12141
    if do_nodes or do_instances:
12142
      all_nodes = lu.cfg.GetAllNodesInfo()
12143
      group_to_nodes = dict((uuid, []) for uuid in self.wanted)
12144
      node_to_group = {}
12145

    
12146
      for node in all_nodes.values():
12147
        if node.group in group_to_nodes:
12148
          group_to_nodes[node.group].append(node.name)
12149
          node_to_group[node.name] = node.group
12150

    
12151
      if do_instances:
12152
        all_instances = lu.cfg.GetAllInstancesInfo()
12153
        group_to_instances = dict((uuid, []) for uuid in self.wanted)
12154

    
12155
        for instance in all_instances.values():
12156
          node = instance.primary_node
12157
          if node in node_to_group:
12158
            group_to_instances[node_to_group[node]].append(instance.name)
12159

    
12160
        if not do_nodes:
12161
          # Do not pass on node information if it was not requested.
12162
          group_to_nodes = None
12163

    
12164
    return query.GroupQueryData([self._all_groups[uuid]
12165
                                 for uuid in self.wanted],
12166
                                group_to_nodes, group_to_instances)
12167

    
12168

    
12169
class LUGroupQuery(NoHooksLU):
12170
  """Logical unit for querying node groups.
12171

12172
  """
12173
  REQ_BGL = False
12174

    
12175
  def CheckArguments(self):
12176
    self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
12177
                          self.op.output_fields, False)
12178

    
12179
  def ExpandNames(self):
12180
    self.gq.ExpandNames(self)
12181

    
12182
  def DeclareLocks(self, level):
12183
    self.gq.DeclareLocks(self, level)
12184

    
12185
  def Exec(self, feedback_fn):
12186
    return self.gq.OldStyleQuery(self)
12187

    
12188

    
12189
class LUGroupSetParams(LogicalUnit):
12190
  """Modifies the parameters of a node group.
12191

12192
  """
12193
  HPATH = "group-modify"
12194
  HTYPE = constants.HTYPE_GROUP
12195
  REQ_BGL = False
12196

    
12197
  def CheckArguments(self):
12198
    all_changes = [
12199
      self.op.ndparams,
12200
      self.op.alloc_policy,
12201
      ]
12202

    
12203
    if all_changes.count(None) == len(all_changes):
12204
      raise errors.OpPrereqError("Please pass at least one modification",
12205
                                 errors.ECODE_INVAL)
12206

    
12207
  def ExpandNames(self):
12208
    # This raises errors.OpPrereqError on its own:
12209
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
12210

    
12211
    self.needed_locks = {
12212
      locking.LEVEL_NODEGROUP: [self.group_uuid],
12213
      }
12214

    
12215
  def CheckPrereq(self):
12216
    """Check prerequisites.
12217

12218
    """
12219
    self.group = self.cfg.GetNodeGroup(self.group_uuid)
12220

    
12221
    if self.group is None:
12222
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
12223
                               (self.op.group_name, self.group_uuid))
12224

    
12225
    if self.op.ndparams:
12226
      new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
12227
      utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
12228
      self.new_ndparams = new_ndparams
12229

    
12230
  def BuildHooksEnv(self):
12231
    """Build hooks env.
12232

12233
    """
12234
    return {
12235
      "GROUP_NAME": self.op.group_name,
12236
      "NEW_ALLOC_POLICY": self.op.alloc_policy,
12237
      }
12238

    
12239
  def BuildHooksNodes(self):
12240
    """Build hooks nodes.
12241

12242
    """
12243
    mn = self.cfg.GetMasterNode()
12244
    return ([mn], [mn])
12245

    
12246
  def Exec(self, feedback_fn):
12247
    """Modifies the node group.
12248

12249
    """
12250
    result = []
12251

    
12252
    if self.op.ndparams:
12253
      self.group.ndparams = self.new_ndparams
12254
      result.append(("ndparams", str(self.group.ndparams)))
12255

    
12256
    if self.op.alloc_policy:
12257
      self.group.alloc_policy = self.op.alloc_policy
12258

    
12259
    self.cfg.Update(self.group, feedback_fn)
12260
    return result
12261

    
12262

    
12263
class LUGroupRemove(LogicalUnit):
12264
  HPATH = "group-remove"
12265
  HTYPE = constants.HTYPE_GROUP
12266
  REQ_BGL = False
12267

    
12268
  def ExpandNames(self):
12269
    # This will raises errors.OpPrereqError on its own:
12270
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
12271
    self.needed_locks = {
12272
      locking.LEVEL_NODEGROUP: [self.group_uuid],
12273
      }
12274

    
12275
  def CheckPrereq(self):
12276
    """Check prerequisites.
12277

12278
    This checks that the given group name exists as a node group, that is
12279
    empty (i.e., contains no nodes), and that is not the last group of the
12280
    cluster.
12281

12282
    """
12283
    # Verify that the group is empty.
12284
    group_nodes = [node.name
12285
                   for node in self.cfg.GetAllNodesInfo().values()
12286
                   if node.group == self.group_uuid]
12287

    
12288
    if group_nodes:
12289
      raise errors.OpPrereqError("Group '%s' not empty, has the following"
12290
                                 " nodes: %s" %
12291
                                 (self.op.group_name,
12292
                                  utils.CommaJoin(utils.NiceSort(group_nodes))),
12293
                                 errors.ECODE_STATE)
12294

    
12295
    # Verify the cluster would not be left group-less.
12296
    if len(self.cfg.GetNodeGroupList()) == 1:
12297
      raise errors.OpPrereqError("Group '%s' is the only group,"
12298
                                 " cannot be removed" %
12299
                                 self.op.group_name,
12300
                                 errors.ECODE_STATE)
12301

    
12302
  def BuildHooksEnv(self):
12303
    """Build hooks env.
12304

12305
    """
12306
    return {
12307
      "GROUP_NAME": self.op.group_name,
12308
      }
12309

    
12310
  def BuildHooksNodes(self):
12311
    """Build hooks nodes.
12312

12313
    """
12314
    mn = self.cfg.GetMasterNode()
12315
    return ([mn], [mn])
12316

    
12317
  def Exec(self, feedback_fn):
12318
    """Remove the node group.
12319

12320
    """
12321
    try:
12322
      self.cfg.RemoveNodeGroup(self.group_uuid)
12323
    except errors.ConfigurationError:
12324
      raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
12325
                               (self.op.group_name, self.group_uuid))
12326

    
12327
    self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
12328

    
12329

    
12330
class LUGroupRename(LogicalUnit):
12331
  HPATH = "group-rename"
12332
  HTYPE = constants.HTYPE_GROUP
12333
  REQ_BGL = False
12334

    
12335
  def ExpandNames(self):
12336
    # This raises errors.OpPrereqError on its own:
12337
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
12338

    
12339
    self.needed_locks = {
12340
      locking.LEVEL_NODEGROUP: [self.group_uuid],
12341
      }
12342

    
12343
  def CheckPrereq(self):
12344
    """Check prerequisites.
12345

12346
    Ensures requested new name is not yet used.
12347

12348
    """
12349
    try:
12350
      new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
12351
    except errors.OpPrereqError:
12352
      pass
12353
    else:
12354
      raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
12355
                                 " node group (UUID: %s)" %
12356
                                 (self.op.new_name, new_name_uuid),
12357
                                 errors.ECODE_EXISTS)
12358

    
12359
  def BuildHooksEnv(self):
12360
    """Build hooks env.
12361

12362
    """
12363
    return {
12364
      "OLD_NAME": self.op.group_name,
12365
      "NEW_NAME": self.op.new_name,
12366
      }
12367

    
12368
  def BuildHooksNodes(self):
12369
    """Build hooks nodes.
12370

12371
    """
12372
    mn = self.cfg.GetMasterNode()
12373

    
12374
    all_nodes = self.cfg.GetAllNodesInfo()
12375
    all_nodes.pop(mn, None)
12376

    
12377
    run_nodes = [mn]
12378
    run_nodes.extend(node.name for node in all_nodes.values()
12379
                     if node.group == self.group_uuid)
12380

    
12381
    return (run_nodes, run_nodes)
12382

    
12383
  def Exec(self, feedback_fn):
12384
    """Rename the node group.
12385

12386
    """
12387
    group = self.cfg.GetNodeGroup(self.group_uuid)
12388

    
12389
    if group is None:
12390
      raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
12391
                               (self.op.group_name, self.group_uuid))
12392

    
12393
    group.name = self.op.new_name
12394
    self.cfg.Update(group, feedback_fn)
12395

    
12396
    return self.op.new_name
12397

    
12398

    
12399
class LUGroupEvacuate(LogicalUnit):
12400
  HPATH = "group-evacuate"
12401
  HTYPE = constants.HTYPE_GROUP
12402
  REQ_BGL = False
12403

    
12404
  def ExpandNames(self):
12405
    # This raises errors.OpPrereqError on its own:
12406
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
12407

    
12408
    if self.op.target_groups:
12409
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
12410
                                  self.op.target_groups)
12411
    else:
12412
      self.req_target_uuids = []
12413

    
12414
    if self.group_uuid in self.req_target_uuids:
12415
      raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
12416
                                 " as a target group (targets are %s)" %
12417
                                 (self.group_uuid,
12418
                                  utils.CommaJoin(self.req_target_uuids)),
12419
                                 errors.ECODE_INVAL)
12420

    
12421
    self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
12422

    
12423
    self.share_locks = _ShareAll()
12424
    self.needed_locks = {
12425
      locking.LEVEL_INSTANCE: [],
12426
      locking.LEVEL_NODEGROUP: [],
12427
      locking.LEVEL_NODE: [],
12428
      }
12429

    
12430
  def DeclareLocks(self, level):
12431
    if level == locking.LEVEL_INSTANCE:
12432
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
12433

    
12434
      # Lock instances optimistically, needs verification once node and group
12435
      # locks have been acquired
12436
      self.needed_locks[locking.LEVEL_INSTANCE] = \
12437
        self.cfg.GetNodeGroupInstances(self.group_uuid)
12438

    
12439
    elif level == locking.LEVEL_NODEGROUP:
12440
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
12441

    
12442
      if self.req_target_uuids:
12443
        lock_groups = set([self.group_uuid] + self.req_target_uuids)
12444

    
12445
        # Lock all groups used by instances optimistically; this requires going
12446
        # via the node before it's locked, requiring verification later on
12447
        lock_groups.update(group_uuid
12448
                           for instance_name in
12449
                             self.owned_locks(locking.LEVEL_INSTANCE)
12450
                           for group_uuid in
12451
                             self.cfg.GetInstanceNodeGroups(instance_name))
12452
      else:
12453
        # No target groups, need to lock all of them
12454
        lock_groups = locking.ALL_SET
12455

    
12456
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
12457

    
12458
    elif level == locking.LEVEL_NODE:
12459
      # This will only lock the nodes in the group to be evacuated which
12460
      # contain actual instances
12461
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
12462
      self._LockInstancesNodes()
12463

    
12464
      # Lock all nodes in group to be evacuated and target groups
12465
      owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
12466
      assert self.group_uuid in owned_groups
12467
      member_nodes = [node_name
12468
                      for group in owned_groups
12469
                      for node_name in self.cfg.GetNodeGroup(group).members]
12470
      self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
12471

    
12472
  def CheckPrereq(self):
12473
    owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
12474
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
12475
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
12476

    
12477
    assert owned_groups.issuperset(self.req_target_uuids)
12478
    assert self.group_uuid in owned_groups
12479

    
12480
    # Check if locked instances are still correct
12481
    _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
12482

    
12483
    # Get instance information
12484
    self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
12485

    
12486
    # Check if node groups for locked instances are still correct
12487
    for instance_name in owned_instances:
12488
      inst = self.instances[instance_name]
12489
      assert owned_nodes.issuperset(inst.all_nodes), \
12490
        "Instance %s's nodes changed while we kept the lock" % instance_name
12491

    
12492
      inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
12493
                                             owned_groups)
12494

    
12495
      assert self.group_uuid in inst_groups, \
12496
        "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
12497

    
12498
    if self.req_target_uuids:
12499
      # User requested specific target groups
12500
      self.target_uuids = self.req_target_uuids
12501
    else:
12502
      # All groups except the one to be evacuated are potential targets
12503
      self.target_uuids = [group_uuid for group_uuid in owned_groups
12504
                           if group_uuid != self.group_uuid]
12505

    
12506
      if not self.target_uuids:
12507
        raise errors.OpPrereqError("There are no possible target groups",
12508
                                   errors.ECODE_INVAL)
12509

    
12510
  def BuildHooksEnv(self):
12511
    """Build hooks env.
12512

12513
    """
12514
    return {
12515
      "GROUP_NAME": self.op.group_name,
12516
      "TARGET_GROUPS": " ".join(self.target_uuids),
12517
      }
12518

    
12519
  def BuildHooksNodes(self):
12520
    """Build hooks nodes.
12521

12522
    """
12523
    mn = self.cfg.GetMasterNode()
12524

    
12525
    assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
12526

    
12527
    run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
12528

    
12529
    return (run_nodes, run_nodes)
12530

    
12531
  def Exec(self, feedback_fn):
12532
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
12533

    
12534
    assert self.group_uuid not in self.target_uuids
12535

    
12536
    ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
12537
                     instances=instances, target_groups=self.target_uuids)
12538

    
12539
    ial.Run(self.op.iallocator)
12540

    
12541
    if not ial.success:
12542
      raise errors.OpPrereqError("Can't compute group evacuation using"
12543
                                 " iallocator '%s': %s" %
12544
                                 (self.op.iallocator, ial.info),
12545
                                 errors.ECODE_NORES)
12546

    
12547
    jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
12548

    
12549
    self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
12550
                 len(jobs), self.op.group_name)
12551

    
12552
    return ResultWithJobs(jobs)
12553

    
12554

    
12555
class TagsLU(NoHooksLU): # pylint: disable=W0223
12556
  """Generic tags LU.
12557

12558
  This is an abstract class which is the parent of all the other tags LUs.
12559

12560
  """
12561
  def ExpandNames(self):
12562
    self.group_uuid = None
12563
    self.needed_locks = {}
12564
    if self.op.kind == constants.TAG_NODE:
12565
      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
12566
      self.needed_locks[locking.LEVEL_NODE] = self.op.name
12567
    elif self.op.kind == constants.TAG_INSTANCE:
12568
      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
12569
      self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
12570
    elif self.op.kind == constants.TAG_NODEGROUP:
12571
      self.group_uuid = self.cfg.LookupNodeGroup(self.op.name)
12572

    
12573
    # FIXME: Acquire BGL for cluster tag operations (as of this writing it's
12574
    # not possible to acquire the BGL based on opcode parameters)
12575

    
12576
  def CheckPrereq(self):
12577
    """Check prerequisites.
12578

12579
    """
12580
    if self.op.kind == constants.TAG_CLUSTER:
12581
      self.target = self.cfg.GetClusterInfo()
12582
    elif self.op.kind == constants.TAG_NODE:
12583
      self.target = self.cfg.GetNodeInfo(self.op.name)
12584
    elif self.op.kind == constants.TAG_INSTANCE:
12585
      self.target = self.cfg.GetInstanceInfo(self.op.name)
12586
    elif self.op.kind == constants.TAG_NODEGROUP:
12587
      self.target = self.cfg.GetNodeGroup(self.group_uuid)
12588
    else:
12589
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
12590
                                 str(self.op.kind), errors.ECODE_INVAL)
12591

    
12592

    
12593
class LUTagsGet(TagsLU):
12594
  """Returns the tags of a given object.
12595

12596
  """
12597
  REQ_BGL = False
12598

    
12599
  def ExpandNames(self):
12600
    TagsLU.ExpandNames(self)
12601

    
12602
    # Share locks as this is only a read operation
12603
    self.share_locks = _ShareAll()
12604

    
12605
  def Exec(self, feedback_fn):
12606
    """Returns the tag list.
12607

12608
    """
12609
    return list(self.target.GetTags())
12610

    
12611

    
12612
class LUTagsSearch(NoHooksLU):
12613
  """Searches the tags for a given pattern.
12614

12615
  """
12616
  REQ_BGL = False
12617

    
12618
  def ExpandNames(self):
12619
    self.needed_locks = {}
12620

    
12621
  def CheckPrereq(self):
12622
    """Check prerequisites.
12623

12624
    This checks the pattern passed for validity by compiling it.
12625

12626
    """
12627
    try:
12628
      self.re = re.compile(self.op.pattern)
12629
    except re.error, err:
12630
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
12631
                                 (self.op.pattern, err), errors.ECODE_INVAL)
12632

    
12633
  def Exec(self, feedback_fn):
12634
    """Returns the tag list.
12635

12636
    """
12637
    cfg = self.cfg
12638
    tgts = [("/cluster", cfg.GetClusterInfo())]
12639
    ilist = cfg.GetAllInstancesInfo().values()
12640
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
12641
    nlist = cfg.GetAllNodesInfo().values()
12642
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
12643
    tgts.extend(("/nodegroup/%s" % n.name, n)
12644
                for n in cfg.GetAllNodeGroupsInfo().values())
12645
    results = []
12646
    for path, target in tgts:
12647
      for tag in target.GetTags():
12648
        if self.re.search(tag):
12649
          results.append((path, tag))
12650
    return results
12651

    
12652

    
12653
class LUTagsSet(TagsLU):
12654
  """Sets a tag on a given object.
12655

12656
  """
12657
  REQ_BGL = False
12658

    
12659
  def CheckPrereq(self):
12660
    """Check prerequisites.
12661

12662
    This checks the type and length of the tag name and value.
12663

12664
    """
12665
    TagsLU.CheckPrereq(self)
12666
    for tag in self.op.tags:
12667
      objects.TaggableObject.ValidateTag(tag)
12668

    
12669
  def Exec(self, feedback_fn):
12670
    """Sets the tag.
12671

12672
    """
12673
    try:
12674
      for tag in self.op.tags:
12675
        self.target.AddTag(tag)
12676
    except errors.TagError, err:
12677
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
12678
    self.cfg.Update(self.target, feedback_fn)
12679

    
12680

    
12681
class LUTagsDel(TagsLU):
12682
  """Delete a list of tags from a given object.
12683

12684
  """
12685
  REQ_BGL = False
12686

    
12687
  def CheckPrereq(self):
12688
    """Check prerequisites.
12689

12690
    This checks that we have the given tag.
12691

12692
    """
12693
    TagsLU.CheckPrereq(self)
12694
    for tag in self.op.tags:
12695
      objects.TaggableObject.ValidateTag(tag)
12696
    del_tags = frozenset(self.op.tags)
12697
    cur_tags = self.target.GetTags()
12698

    
12699
    diff_tags = del_tags - cur_tags
12700
    if diff_tags:
12701
      diff_names = ("'%s'" % i for i in sorted(diff_tags))
12702
      raise errors.OpPrereqError("Tag(s) %s not found" %
12703
                                 (utils.CommaJoin(diff_names), ),
12704
                                 errors.ECODE_NOENT)
12705

    
12706
  def Exec(self, feedback_fn):
12707
    """Remove the tag from the object.
12708

12709
    """
12710
    for tag in self.op.tags:
12711
      self.target.RemoveTag(tag)
12712
    self.cfg.Update(self.target, feedback_fn)
12713

    
12714

    
12715
class LUTestDelay(NoHooksLU):
12716
  """Sleep for a specified amount of time.
12717

12718
  This LU sleeps on the master and/or nodes for a specified amount of
12719
  time.
12720

12721
  """
12722
  REQ_BGL = False
12723

    
12724
  def ExpandNames(self):
12725
    """Expand names and set required locks.
12726

12727
    This expands the node list, if any.
12728

12729
    """
12730
    self.needed_locks = {}
12731
    if self.op.on_nodes:
12732
      # _GetWantedNodes can be used here, but is not always appropriate to use
12733
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
12734
      # more information.
12735
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
12736
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
12737

    
12738
  def _TestDelay(self):
12739
    """Do the actual sleep.
12740

12741
    """
12742
    if self.op.on_master:
12743
      if not utils.TestDelay(self.op.duration):
12744
        raise errors.OpExecError("Error during master delay test")
12745
    if self.op.on_nodes:
12746
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
12747
      for node, node_result in result.items():
12748
        node_result.Raise("Failure during rpc call to node %s" % node)
12749

    
12750
  def Exec(self, feedback_fn):
12751
    """Execute the test delay opcode, with the wanted repetitions.
12752

12753
    """
12754
    if self.op.repeat == 0:
12755
      self._TestDelay()
12756
    else:
12757
      top_value = self.op.repeat - 1
12758
      for i in range(self.op.repeat):
12759
        self.LogInfo("Test delay iteration %d/%d" % (i, top_value))
12760
        self._TestDelay()
12761

    
12762

    
12763
class LUTestJqueue(NoHooksLU):
12764
  """Utility LU to test some aspects of the job queue.
12765

12766
  """
12767
  REQ_BGL = False
12768

    
12769
  # Must be lower than default timeout for WaitForJobChange to see whether it
12770
  # notices changed jobs
12771
  _CLIENT_CONNECT_TIMEOUT = 20.0
12772
  _CLIENT_CONFIRM_TIMEOUT = 60.0
12773

    
12774
  @classmethod
12775
  def _NotifyUsingSocket(cls, cb, errcls):
12776
    """Opens a Unix socket and waits for another program to connect.
12777

12778
    @type cb: callable
12779
    @param cb: Callback to send socket name to client
12780
    @type errcls: class
12781
    @param errcls: Exception class to use for errors
12782

12783
    """
12784
    # Using a temporary directory as there's no easy way to create temporary
12785
    # sockets without writing a custom loop around tempfile.mktemp and
12786
    # socket.bind
12787
    tmpdir = tempfile.mkdtemp()
12788
    try:
12789
      tmpsock = utils.PathJoin(tmpdir, "sock")
12790

    
12791
      logging.debug("Creating temporary socket at %s", tmpsock)
12792
      sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
12793
      try:
12794
        sock.bind(tmpsock)
12795
        sock.listen(1)
12796

    
12797
        # Send details to client
12798
        cb(tmpsock)
12799

    
12800
        # Wait for client to connect before continuing
12801
        sock.settimeout(cls._CLIENT_CONNECT_TIMEOUT)
12802
        try:
12803
          (conn, _) = sock.accept()
12804
        except socket.error, err:
12805
          raise errcls("Client didn't connect in time (%s)" % err)
12806
      finally:
12807
        sock.close()
12808
    finally:
12809
      # Remove as soon as client is connected
12810
      shutil.rmtree(tmpdir)
12811

    
12812
    # Wait for client to close
12813
    try:
12814
      try:
12815
        # pylint: disable=E1101
12816
        # Instance of '_socketobject' has no ... member
12817
        conn.settimeout(cls._CLIENT_CONFIRM_TIMEOUT)
12818
        conn.recv(1)
12819
      except socket.error, err:
12820
        raise errcls("Client failed to confirm notification (%s)" % err)
12821
    finally:
12822
      conn.close()
12823

    
12824
  def _SendNotification(self, test, arg, sockname):
12825
    """Sends a notification to the client.
12826

12827
    @type test: string
12828
    @param test: Test name
12829
    @param arg: Test argument (depends on test)
12830
    @type sockname: string
12831
    @param sockname: Socket path
12832

12833
    """
12834
    self.Log(constants.ELOG_JQUEUE_TEST, (sockname, test, arg))
12835

    
12836
  def _Notify(self, prereq, test, arg):
12837
    """Notifies the client of a test.
12838

12839
    @type prereq: bool
12840
    @param prereq: Whether this is a prereq-phase test
12841
    @type test: string
12842
    @param test: Test name
12843
    @param arg: Test argument (depends on test)
12844

12845
    """
12846
    if prereq:
12847
      errcls = errors.OpPrereqError
12848
    else:
12849
      errcls = errors.OpExecError
12850

    
12851
    return self._NotifyUsingSocket(compat.partial(self._SendNotification,
12852
                                                  test, arg),
12853
                                   errcls)
12854

    
12855
  def CheckArguments(self):
12856
    self.checkargs_calls = getattr(self, "checkargs_calls", 0) + 1
12857
    self.expandnames_calls = 0
12858

    
12859
  def ExpandNames(self):
12860
    checkargs_calls = getattr(self, "checkargs_calls", 0)
12861
    if checkargs_calls < 1:
12862
      raise errors.ProgrammerError("CheckArguments was not called")
12863

    
12864
    self.expandnames_calls += 1
12865

    
12866
    if self.op.notify_waitlock:
12867
      self._Notify(True, constants.JQT_EXPANDNAMES, None)
12868

    
12869
    self.LogInfo("Expanding names")
12870

    
12871
    # Get lock on master node (just to get a lock, not for a particular reason)
12872
    self.needed_locks = {
12873
      locking.LEVEL_NODE: self.cfg.GetMasterNode(),
12874
      }
12875

    
12876
  def Exec(self, feedback_fn):
12877
    if self.expandnames_calls < 1:
12878
      raise errors.ProgrammerError("ExpandNames was not called")
12879

    
12880
    if self.op.notify_exec:
12881
      self._Notify(False, constants.JQT_EXEC, None)
12882

    
12883
    self.LogInfo("Executing")
12884

    
12885
    if self.op.log_messages:
12886
      self._Notify(False, constants.JQT_STARTMSG, len(self.op.log_messages))
12887
      for idx, msg in enumerate(self.op.log_messages):
12888
        self.LogInfo("Sending log message %s", idx + 1)
12889
        feedback_fn(constants.JQT_MSGPREFIX + msg)
12890
        # Report how many test messages have been sent
12891
        self._Notify(False, constants.JQT_LOGMSG, idx + 1)
12892

    
12893
    if self.op.fail:
12894
      raise errors.OpExecError("Opcode failure was requested")
12895

    
12896
    return True
12897

    
12898

    
12899
class IAllocator(object):
12900
  """IAllocator framework.
12901

12902
  An IAllocator instance has three sets of attributes:
12903
    - cfg that is needed to query the cluster
12904
    - input data (all members of the _KEYS class attribute are required)
12905
    - four buffer attributes (in|out_data|text), that represent the
12906
      input (to the external script) in text and data structure format,
12907
      and the output from it, again in two formats
12908
    - the result variables from the script (success, info, nodes) for
12909
      easy usage
12910

12911
  """
12912
  # pylint: disable=R0902
12913
  # lots of instance attributes
12914

    
12915
  def __init__(self, cfg, rpc, mode, **kwargs):
12916
    self.cfg = cfg
12917
    self.rpc = rpc
12918
    # init buffer variables
12919
    self.in_text = self.out_text = self.in_data = self.out_data = None
12920
    # init all input fields so that pylint is happy
12921
    self.mode = mode
12922
    self.memory = self.disks = self.disk_template = None
12923
    self.os = self.tags = self.nics = self.vcpus = None
12924
    self.hypervisor = None
12925
    self.relocate_from = None
12926
    self.name = None
12927
    self.instances = None
12928
    self.evac_mode = None
12929
    self.target_groups = []
12930
    # computed fields
12931
    self.required_nodes = None
12932
    # init result fields
12933
    self.success = self.info = self.result = None
12934

    
12935
    try:
12936
      (fn, keydata, self._result_check) = self._MODE_DATA[self.mode]
12937
    except KeyError:
12938
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
12939
                                   " IAllocator" % self.mode)
12940

    
12941
    keyset = [n for (n, _) in keydata]
12942

    
12943
    for key in kwargs:
12944
      if key not in keyset:
12945
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
12946
                                     " IAllocator" % key)
12947
      setattr(self, key, kwargs[key])
12948

    
12949
    for key in keyset:
12950
      if key not in kwargs:
12951
        raise errors.ProgrammerError("Missing input parameter '%s' to"
12952
                                     " IAllocator" % key)
12953
    self._BuildInputData(compat.partial(fn, self), keydata)
12954

    
12955
  def _ComputeClusterData(self):
12956
    """Compute the generic allocator input data.
12957

12958
    This is the data that is independent of the actual operation.
12959

12960
    """
12961
    cfg = self.cfg
12962
    cluster_info = cfg.GetClusterInfo()
12963
    # cluster data
12964
    data = {
12965
      "version": constants.IALLOCATOR_VERSION,
12966
      "cluster_name": cfg.GetClusterName(),
12967
      "cluster_tags": list(cluster_info.GetTags()),
12968
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
12969
      # we don't have job IDs
12970
      }
12971
    ninfo = cfg.GetAllNodesInfo()
12972
    iinfo = cfg.GetAllInstancesInfo().values()
12973
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
12974

    
12975
    # node data
12976
    node_list = [n.name for n in ninfo.values() if n.vm_capable]
12977

    
12978
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
12979
      hypervisor_name = self.hypervisor
12980
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
12981
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
12982
    else:
12983
      hypervisor_name = cluster_info.enabled_hypervisors[0]
12984

    
12985
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
12986
                                        hypervisor_name)
12987
    node_iinfo = \
12988
      self.rpc.call_all_instances_info(node_list,
12989
                                       cluster_info.enabled_hypervisors)
12990

    
12991
    data["nodegroups"] = self._ComputeNodeGroupData(cfg)
12992

    
12993
    config_ndata = self._ComputeBasicNodeData(ninfo)
12994
    data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
12995
                                                 i_list, config_ndata)
12996
    assert len(data["nodes"]) == len(ninfo), \
12997
        "Incomplete node data computed"
12998

    
12999
    data["instances"] = self._ComputeInstanceData(cluster_info, i_list)
13000

    
13001
    self.in_data = data
13002

    
13003
  @staticmethod
13004
  def _ComputeNodeGroupData(cfg):
13005
    """Compute node groups data.
13006

13007
    """
13008
    ng = dict((guuid, {
13009
      "name": gdata.name,
13010
      "alloc_policy": gdata.alloc_policy,
13011
      })
13012
      for guuid, gdata in cfg.GetAllNodeGroupsInfo().items())
13013

    
13014
    return ng
13015

    
13016
  @staticmethod
13017
  def _ComputeBasicNodeData(node_cfg):
13018
    """Compute global node data.
13019

13020
    @rtype: dict
13021
    @returns: a dict of name: (node dict, node config)
13022

13023
    """
13024
    # fill in static (config-based) values
13025
    node_results = dict((ninfo.name, {
13026
      "tags": list(ninfo.GetTags()),
13027
      "primary_ip": ninfo.primary_ip,
13028
      "secondary_ip": ninfo.secondary_ip,
13029
      "offline": ninfo.offline,
13030
      "drained": ninfo.drained,
13031
      "master_candidate": ninfo.master_candidate,
13032
      "group": ninfo.group,
13033
      "master_capable": ninfo.master_capable,
13034
      "vm_capable": ninfo.vm_capable,
13035
      })
13036
      for ninfo in node_cfg.values())
13037

    
13038
    return node_results
13039

    
13040
  @staticmethod
13041
  def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
13042
                              node_results):
13043
    """Compute global node data.
13044

13045
    @param node_results: the basic node structures as filled from the config
13046

13047
    """
13048
    # make a copy of the current dict
13049
    node_results = dict(node_results)
13050
    for nname, nresult in node_data.items():
13051
      assert nname in node_results, "Missing basic data for node %s" % nname
13052
      ninfo = node_cfg[nname]
13053

    
13054
      if not (ninfo.offline or ninfo.drained):
13055
        nresult.Raise("Can't get data for node %s" % nname)
13056
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
13057
                                nname)
13058
        remote_info = nresult.payload
13059

    
13060
        for attr in ["memory_total", "memory_free", "memory_dom0",
13061
                     "vg_size", "vg_free", "cpu_total"]:
13062
          if attr not in remote_info:
13063
            raise errors.OpExecError("Node '%s' didn't return attribute"
13064
                                     " '%s'" % (nname, attr))
13065
          if not isinstance(remote_info[attr], int):
13066
            raise errors.OpExecError("Node '%s' returned invalid value"
13067
                                     " for '%s': %s" %
13068
                                     (nname, attr, remote_info[attr]))
13069
        # compute memory used by primary instances
13070
        i_p_mem = i_p_up_mem = 0
13071
        for iinfo, beinfo in i_list:
13072
          if iinfo.primary_node == nname:
13073
            i_p_mem += beinfo[constants.BE_MEMORY]
13074
            if iinfo.name not in node_iinfo[nname].payload:
13075
              i_used_mem = 0
13076
            else:
13077
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]["memory"])
13078
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
13079
            remote_info["memory_free"] -= max(0, i_mem_diff)
13080

    
13081
            if iinfo.admin_up:
13082
              i_p_up_mem += beinfo[constants.BE_MEMORY]
13083

    
13084
        # compute memory used by instances
13085
        pnr_dyn = {
13086
          "total_memory": remote_info["memory_total"],
13087
          "reserved_memory": remote_info["memory_dom0"],
13088
          "free_memory": remote_info["memory_free"],
13089
          "total_disk": remote_info["vg_size"],
13090
          "free_disk": remote_info["vg_free"],
13091
          "total_cpus": remote_info["cpu_total"],
13092
          "i_pri_memory": i_p_mem,
13093
          "i_pri_up_memory": i_p_up_mem,
13094
          }
13095
        pnr_dyn.update(node_results[nname])
13096
        node_results[nname] = pnr_dyn
13097

    
13098
    return node_results
13099

    
13100
  @staticmethod
13101
  def _ComputeInstanceData(cluster_info, i_list):
13102
    """Compute global instance data.
13103

13104
    """
13105
    instance_data = {}
13106
    for iinfo, beinfo in i_list:
13107
      nic_data = []
13108
      for nic in iinfo.nics:
13109
        filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
13110
        nic_dict = {
13111
          "mac": nic.mac,
13112
          "ip": nic.ip,
13113
          "mode": filled_params[constants.NIC_MODE],
13114
          "link": filled_params[constants.NIC_LINK],
13115
          }
13116
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
13117
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
13118
        nic_data.append(nic_dict)
13119
      pir = {
13120
        "tags": list(iinfo.GetTags()),
13121
        "admin_up": iinfo.admin_up,
13122
        "vcpus": beinfo[constants.BE_VCPUS],
13123
        "memory": beinfo[constants.BE_MEMORY],
13124
        "os": iinfo.os,
13125
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
13126
        "nics": nic_data,
13127
        "disks": [{constants.IDISK_SIZE: dsk.size,
13128
                   constants.IDISK_MODE: dsk.mode}
13129
                  for dsk in iinfo.disks],
13130
        "disk_template": iinfo.disk_template,
13131
        "hypervisor": iinfo.hypervisor,
13132
        }
13133
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
13134
                                                 pir["disks"])
13135
      instance_data[iinfo.name] = pir
13136

    
13137
    return instance_data
13138

    
13139
  def _AddNewInstance(self):
13140
    """Add new instance data to allocator structure.
13141

13142
    This in combination with _AllocatorGetClusterData will create the
13143
    correct structure needed as input for the allocator.
13144

13145
    The checks for the completeness of the opcode must have already been
13146
    done.
13147

13148
    """
13149
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
13150

    
13151
    if self.disk_template in constants.DTS_INT_MIRROR:
13152
      self.required_nodes = 2
13153
    else:
13154
      self.required_nodes = 1
13155

    
13156
    request = {
13157
      "name": self.name,
13158
      "disk_template": self.disk_template,
13159
      "tags": self.tags,
13160
      "os": self.os,
13161
      "vcpus": self.vcpus,
13162
      "memory": self.memory,
13163
      "disks": self.disks,
13164
      "disk_space_total": disk_space,
13165
      "nics": self.nics,
13166
      "required_nodes": self.required_nodes,
13167
      "hypervisor": self.hypervisor,
13168
      }
13169

    
13170
    return request
13171

    
13172
  def _AddRelocateInstance(self):
13173
    """Add relocate instance data to allocator structure.
13174

13175
    This in combination with _IAllocatorGetClusterData will create the
13176
    correct structure needed as input for the allocator.
13177

13178
    The checks for the completeness of the opcode must have already been
13179
    done.
13180

13181
    """
13182
    instance = self.cfg.GetInstanceInfo(self.name)
13183
    if instance is None:
13184
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
13185
                                   " IAllocator" % self.name)
13186

    
13187
    if instance.disk_template not in constants.DTS_MIRRORED:
13188
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
13189
                                 errors.ECODE_INVAL)
13190

    
13191
    if instance.disk_template in constants.DTS_INT_MIRROR and \
13192
        len(instance.secondary_nodes) != 1:
13193
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
13194
                                 errors.ECODE_STATE)
13195

    
13196
    self.required_nodes = 1
13197
    disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks]
13198
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
13199

    
13200
    request = {
13201
      "name": self.name,
13202
      "disk_space_total": disk_space,
13203
      "required_nodes": self.required_nodes,
13204
      "relocate_from": self.relocate_from,
13205
      }
13206
    return request
13207

    
13208
  def _AddNodeEvacuate(self):
13209
    """Get data for node-evacuate requests.
13210

13211
    """
13212
    return {
13213
      "instances": self.instances,
13214
      "evac_mode": self.evac_mode,
13215
      }
13216

    
13217
  def _AddChangeGroup(self):
13218
    """Get data for node-evacuate requests.
13219

13220
    """
13221
    return {
13222
      "instances": self.instances,
13223
      "target_groups": self.target_groups,
13224
      }
13225

    
13226
  def _BuildInputData(self, fn, keydata):
13227
    """Build input data structures.
13228

13229
    """
13230
    self._ComputeClusterData()
13231

    
13232
    request = fn()
13233
    request["type"] = self.mode
13234
    for keyname, keytype in keydata:
13235
      if keyname not in request:
13236
        raise errors.ProgrammerError("Request parameter %s is missing" %
13237
                                     keyname)
13238
      val = request[keyname]
13239
      if not keytype(val):
13240
        raise errors.ProgrammerError("Request parameter %s doesn't pass"
13241
                                     " validation, value %s, expected"
13242
                                     " type %s" % (keyname, val, keytype))
13243
    self.in_data["request"] = request
13244

    
13245
    self.in_text = serializer.Dump(self.in_data)
13246

    
13247
  _STRING_LIST = ht.TListOf(ht.TString)
13248
  _JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
13249
     # pylint: disable=E1101
13250
     # Class '...' has no 'OP_ID' member
13251
     "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
13252
                          opcodes.OpInstanceMigrate.OP_ID,
13253
                          opcodes.OpInstanceReplaceDisks.OP_ID])
13254
     })))
13255

    
13256
  _NEVAC_MOVED = \
13257
    ht.TListOf(ht.TAnd(ht.TIsLength(3),
13258
                       ht.TItems([ht.TNonEmptyString,
13259
                                  ht.TNonEmptyString,
13260
                                  ht.TListOf(ht.TNonEmptyString),
13261
                                 ])))
13262
  _NEVAC_FAILED = \
13263
    ht.TListOf(ht.TAnd(ht.TIsLength(2),
13264
                       ht.TItems([ht.TNonEmptyString,
13265
                                  ht.TMaybeString,
13266
                                 ])))
13267
  _NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
13268
                          ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST]))
13269

    
13270
  _MODE_DATA = {
13271
    constants.IALLOCATOR_MODE_ALLOC:
13272
      (_AddNewInstance,
13273
       [
13274
        ("name", ht.TString),
13275
        ("memory", ht.TInt),
13276
        ("disks", ht.TListOf(ht.TDict)),
13277
        ("disk_template", ht.TString),
13278
        ("os", ht.TString),
13279
        ("tags", _STRING_LIST),
13280
        ("nics", ht.TListOf(ht.TDict)),
13281
        ("vcpus", ht.TInt),
13282
        ("hypervisor", ht.TString),
13283
        ], ht.TList),
13284
    constants.IALLOCATOR_MODE_RELOC:
13285
      (_AddRelocateInstance,
13286
       [("name", ht.TString), ("relocate_from", _STRING_LIST)],
13287
       ht.TList),
13288
     constants.IALLOCATOR_MODE_NODE_EVAC:
13289
      (_AddNodeEvacuate, [
13290
        ("instances", _STRING_LIST),
13291
        ("evac_mode", ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES)),
13292
        ], _NEVAC_RESULT),
13293
     constants.IALLOCATOR_MODE_CHG_GROUP:
13294
      (_AddChangeGroup, [
13295
        ("instances", _STRING_LIST),
13296
        ("target_groups", _STRING_LIST),
13297
        ], _NEVAC_RESULT),
13298
    }
13299

    
13300
  def Run(self, name, validate=True, call_fn=None):
13301
    """Run an instance allocator and return the results.
13302

13303
    """
13304
    if call_fn is None:
13305
      call_fn = self.rpc.call_iallocator_runner
13306

    
13307
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
13308
    result.Raise("Failure while running the iallocator script")
13309

    
13310
    self.out_text = result.payload
13311
    if validate:
13312
      self._ValidateResult()
13313

    
13314
  def _ValidateResult(self):
13315
    """Process the allocator results.
13316

13317
    This will process and if successful save the result in
13318
    self.out_data and the other parameters.
13319

13320
    """
13321
    try:
13322
      rdict = serializer.Load(self.out_text)
13323
    except Exception, err:
13324
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
13325

    
13326
    if not isinstance(rdict, dict):
13327
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
13328

    
13329
    # TODO: remove backwards compatiblity in later versions
13330
    if "nodes" in rdict and "result" not in rdict:
13331
      rdict["result"] = rdict["nodes"]
13332
      del rdict["nodes"]
13333

    
13334
    for key in "success", "info", "result":
13335
      if key not in rdict:
13336
        raise errors.OpExecError("Can't parse iallocator results:"
13337
                                 " missing key '%s'" % key)
13338
      setattr(self, key, rdict[key])
13339

    
13340
    if not self._result_check(self.result):
13341
      raise errors.OpExecError("Iallocator returned invalid result,"
13342
                               " expected %s, got %s" %
13343
                               (self._result_check, self.result),
13344
                               errors.ECODE_INVAL)
13345

    
13346
    if self.mode == constants.IALLOCATOR_MODE_RELOC:
13347
      assert self.relocate_from is not None
13348
      assert self.required_nodes == 1
13349

    
13350
      node2group = dict((name, ndata["group"])
13351
                        for (name, ndata) in self.in_data["nodes"].items())
13352

    
13353
      fn = compat.partial(self._NodesToGroups, node2group,
13354
                          self.in_data["nodegroups"])
13355

    
13356
      instance = self.cfg.GetInstanceInfo(self.name)
13357
      request_groups = fn(self.relocate_from + [instance.primary_node])
13358
      result_groups = fn(rdict["result"] + [instance.primary_node])
13359

    
13360
      if self.success and not set(result_groups).issubset(request_groups):
13361
        raise errors.OpExecError("Groups of nodes returned by iallocator (%s)"
13362
                                 " differ from original groups (%s)" %
13363
                                 (utils.CommaJoin(result_groups),
13364
                                  utils.CommaJoin(request_groups)))
13365

    
13366
    elif self.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
13367
      assert self.evac_mode in constants.IALLOCATOR_NEVAC_MODES
13368

    
13369
    self.out_data = rdict
13370

    
13371
  @staticmethod
13372
  def _NodesToGroups(node2group, groups, nodes):
13373
    """Returns a list of unique group names for a list of nodes.
13374

13375
    @type node2group: dict
13376
    @param node2group: Map from node name to group UUID
13377
    @type groups: dict
13378
    @param groups: Group information
13379
    @type nodes: list
13380
    @param nodes: Node names
13381

13382
    """
13383
    result = set()
13384

    
13385
    for node in nodes:
13386
      try:
13387
        group_uuid = node2group[node]
13388
      except KeyError:
13389
        # Ignore unknown node
13390
        pass
13391
      else:
13392
        try:
13393
          group = groups[group_uuid]
13394
        except KeyError:
13395
          # Can't find group, let's use UUID
13396
          group_name = group_uuid
13397
        else:
13398
          group_name = group["name"]
13399

    
13400
        result.add(group_name)
13401

    
13402
    return sorted(result)
13403

    
13404

    
13405
class LUTestAllocator(NoHooksLU):
13406
  """Run allocator tests.
13407

13408
  This LU runs the allocator tests
13409

13410
  """
13411
  def CheckPrereq(self):
13412
    """Check prerequisites.
13413

13414
    This checks the opcode parameters depending on the director and mode test.
13415

13416
    """
13417
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
13418
      for attr in ["memory", "disks", "disk_template",
13419
                   "os", "tags", "nics", "vcpus"]:
13420
        if not hasattr(self.op, attr):
13421
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
13422
                                     attr, errors.ECODE_INVAL)
13423
      iname = self.cfg.ExpandInstanceName(self.op.name)
13424
      if iname is not None:
13425
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
13426
                                   iname, errors.ECODE_EXISTS)
13427
      if not isinstance(self.op.nics, list):
13428
        raise errors.OpPrereqError("Invalid parameter 'nics'",
13429
                                   errors.ECODE_INVAL)
13430
      if not isinstance(self.op.disks, list):
13431
        raise errors.OpPrereqError("Invalid parameter 'disks'",
13432
                                   errors.ECODE_INVAL)
13433
      for row in self.op.disks:
13434
        if (not isinstance(row, dict) or
13435
            constants.IDISK_SIZE not in row or
13436
            not isinstance(row[constants.IDISK_SIZE], int) or
13437
            constants.IDISK_MODE not in row or
13438
            row[constants.IDISK_MODE] not in constants.DISK_ACCESS_SET):
13439
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
13440
                                     " parameter", errors.ECODE_INVAL)
13441
      if self.op.hypervisor is None:
13442
        self.op.hypervisor = self.cfg.GetHypervisorType()
13443
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
13444
      fname = _ExpandInstanceName(self.cfg, self.op.name)
13445
      self.op.name = fname
13446
      self.relocate_from = \
13447
          list(self.cfg.GetInstanceInfo(fname).secondary_nodes)
13448
    elif self.op.mode in (constants.IALLOCATOR_MODE_CHG_GROUP,
13449
                          constants.IALLOCATOR_MODE_NODE_EVAC):
13450
      if not self.op.instances:
13451
        raise errors.OpPrereqError("Missing instances", errors.ECODE_INVAL)
13452
      self.op.instances = _GetWantedInstances(self, self.op.instances)
13453
    else:
13454
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
13455
                                 self.op.mode, errors.ECODE_INVAL)
13456

    
13457
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
13458
      if self.op.allocator is None:
13459
        raise errors.OpPrereqError("Missing allocator name",
13460
                                   errors.ECODE_INVAL)
13461
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
13462
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
13463
                                 self.op.direction, errors.ECODE_INVAL)
13464

    
13465
  def Exec(self, feedback_fn):
13466
    """Run the allocator test.
13467

13468
    """
13469
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
13470
      ial = IAllocator(self.cfg, self.rpc,
13471
                       mode=self.op.mode,
13472
                       name=self.op.name,
13473
                       memory=self.op.memory,
13474
                       disks=self.op.disks,
13475
                       disk_template=self.op.disk_template,
13476
                       os=self.op.os,
13477
                       tags=self.op.tags,
13478
                       nics=self.op.nics,
13479
                       vcpus=self.op.vcpus,
13480
                       hypervisor=self.op.hypervisor,
13481
                       )
13482
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
13483
      ial = IAllocator(self.cfg, self.rpc,
13484
                       mode=self.op.mode,
13485
                       name=self.op.name,
13486
                       relocate_from=list(self.relocate_from),
13487
                       )
13488
    elif self.op.mode == constants.IALLOCATOR_MODE_CHG_GROUP:
13489
      ial = IAllocator(self.cfg, self.rpc,
13490
                       mode=self.op.mode,
13491
                       instances=self.op.instances,
13492
                       target_groups=self.op.target_groups)
13493
    elif self.op.mode == constants.IALLOCATOR_MODE_NODE_EVAC:
13494
      ial = IAllocator(self.cfg, self.rpc,
13495
                       mode=self.op.mode,
13496
                       instances=self.op.instances,
13497
                       evac_mode=self.op.evac_mode)
13498
    else:
13499
      raise errors.ProgrammerError("Uncatched mode %s in"
13500
                                   " LUTestAllocator.Exec", self.op.mode)
13501

    
13502
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
13503
      result = ial.in_text
13504
    else:
13505
      ial.Run(self.op.allocator, validate=False)
13506
      result = ial.out_text
13507
    return result
13508

    
13509

    
13510
#: Query type implementations
13511
_QUERY_IMPL = {
13512
  constants.QR_INSTANCE: _InstanceQuery,
13513
  constants.QR_NODE: _NodeQuery,
13514
  constants.QR_GROUP: _GroupQuery,
13515
  constants.QR_OS: _OsQuery,
13516
  }
13517

    
13518
assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP
13519

    
13520

    
13521
def _GetQueryImplementation(name):
13522
  """Returns the implemtnation for a query type.
13523

13524
  @param name: Query type, must be one of L{constants.QR_VIA_OP}
13525

13526
  """
13527
  try:
13528
    return _QUERY_IMPL[name]
13529
  except KeyError:
13530
    raise errors.OpPrereqError("Unknown query resource '%s'" % name,
13531
                               errors.ECODE_INVAL)